diff --git a/lib/api-client.js b/lib/api-client.js
index 765b0c1da..cef48f423 100644
--- a/lib/api-client.js
+++ b/lib/api-client.js
@@ -10,7 +10,8 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
-const github = __importStar(require("@actions/github"));
+const githubUtils = __importStar(require("@actions/github/lib/utils"));
+const retry = __importStar(require("@octokit/plugin-retry"));
const console_log_level_1 = __importDefault(require("console-log-level"));
const path = __importStar(require("path"));
const actions_util_1 = require("./actions-util");
@@ -19,11 +20,12 @@ exports.getApiClient = function (githubAuth, githubUrl, allowLocalRun = false) {
if (util_1.isLocalRun() && !allowLocalRun) {
throw new Error("Invalid API call in local run");
}
- return github.getOctokit(githubAuth, {
+ const retryingOctokit = githubUtils.GitHub.plugin(retry.retry);
+ return new retryingOctokit(githubUtils.getOctokitOptions(githubAuth, {
baseUrl: getApiUrl(githubUrl),
userAgent: "CodeQL Action",
log: console_log_level_1.default({ level: "debug" }),
- });
+ }));
};
function getApiUrl(githubUrl) {
const url = new URL(githubUrl);
diff --git a/lib/api-client.js.map b/lib/api-client.js.map
index a41b8f192..a034ddbe6 100644
--- a/lib/api-client.js.map
+++ b/lib/api-client.js.map
@@ -1 +1 @@
-{"version":3,"file":"api-client.js","sourceRoot":"","sources":["../src/api-client.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,wDAA0C;AAC1C,0EAAgD;AAChD,2CAA6B;AAE7B,iDAAuE;AACvE,iCAAoC;AAEvB,QAAA,YAAY,GAAG,UAC1B,UAAkB,EAClB,SAAiB,EACjB,aAAa,GAAG,KAAK;IAErB,IAAI,iBAAU,EAAE,IAAI,CAAC,aAAa,EAAE;QAClC,MAAM,IAAI,KAAK,CAAC,+BAA+B,CAAC,CAAC;KAClD;IACD,OAAO,MAAM,CAAC,UAAU,CAAC,UAAU,EAAE;QACnC,OAAO,EAAE,SAAS,CAAC,SAAS,CAAC;QAC7B,SAAS,EAAE,eAAe;QAC1B,GAAG,EAAE,2BAAe,CAAC,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC;KACzC,CAAC,CAAC;AACL,CAAC,CAAC;AAEF,SAAS,SAAS,CAAC,SAAiB;IAClC,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC;IAE/B,kDAAkD;IAClD,0CAA0C;IAC1C,IAAI,GAAG,CAAC,QAAQ,KAAK,YAAY,IAAI,GAAG,CAAC,QAAQ,KAAK,gBAAgB,EAAE;QACtE,OAAO,wBAAwB,CAAC;KACjC;IAED,6BAA6B;IAC7B,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,EAAE,KAAK,EAAE,IAAI,CAAC,CAAC;IACpD,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;AACxB,CAAC;AAED,uFAAuF;AACvF,oFAAoF;AACpF,+CAA+C;AAC/C,SAAgB,mBAAmB,CAAC,aAAa,GAAG,KAAK;IACvD,OAAO,oBAAY,CACjB,+BAAgB,CAAC,OAAO,CAAC,EACzB,kCAAmB,CAAC,mBAAmB,CAAC,EACxC,aAAa,CACd,CAAC;AACJ,CAAC;AAND,kDAMC"}
\ No newline at end of file
+{"version":3,"file":"api-client.js","sourceRoot":"","sources":["../src/api-client.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,uEAAyD;AACzD,6DAA+C;AAC/C,0EAAgD;AAChD,2CAA6B;AAE7B,iDAAuE;AACvE,iCAAoC;AAEvB,QAAA,YAAY,GAAG,UAC1B,UAAkB,EAClB,SAAiB,EACjB,aAAa,GAAG,KAAK;IAErB,IAAI,iBAAU,EAAE,IAAI,CAAC,aAAa,EAAE;QAClC,MAAM,IAAI,KAAK,CAAC,+BAA+B,CAAC,CAAC;KAClD;IACD,MAAM,eAAe,GAAG,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;IAC/D,OAAO,IAAI,eAAe,CACxB,WAAW,CAAC,iBAAiB,CAAC,UAAU,EAAE;QACxC,OAAO,EAAE,SAAS,CAAC,SAAS,CAAC;QAC7B,SAAS,EAAE,eAAe;QAC1B,GAAG,EAAE,2BAAe,CAAC,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC;KACzC,CAAC,CACH,CAAC;AACJ,CAAC,CAAC;AAEF,SAAS,SAAS,CAAC,SAAiB;IAClC,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC;IAE/B,kDAAkD;IAClD,0CAA0C;IAC1C,IAAI,GAAG,CAAC,QAAQ,KAAK,YAAY,IAAI,GAAG,CAAC,QAAQ,KAAK,gBAAgB,EAAE;QACtE,OAAO,wBAAwB,CAAC;KACjC;IAED,6BAA6B;IAC7B,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,EAAE,KAAK,EAAE,IAAI,CAAC,CAAC;IACpD,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;AACxB,CAAC;AAED,uFAAuF;AACvF,oFAAoF;AACpF,+CAA+C;AAC/C,SAAgB,mBAAmB,CAAC,aAAa,GAAG,KAAK;IACvD,OAAO,oBAAY,CACjB,+BAAgB,CAAC,OAAO,CAAC,EACzB,kCAAmB,CAAC,mBAAmB,CAAC,EACxC,aAAa,CACd,CAAC;AACJ,CAAC;AAND,kDAMC"}
\ No newline at end of file
diff --git a/lib/upload-lib.js b/lib/upload-lib.js
index 302c21f4f..1792eb039 100644
--- a/lib/upload-lib.js
+++ b/lib/upload-lib.js
@@ -50,50 +50,17 @@ async function uploadPayload(payload, repositoryNwo, githubAuth, githubUrl, mode
if (testMode) {
return;
}
- // Make up to 4 attempts to upload, and sleep for these
- // number of seconds between each attempt.
- // We don't want to backoff too much to avoid wasting action
- // minutes, but just waiting a little bit could maybe help.
- const backoffPeriods = [1, 5, 15];
const client = api.getApiClient(githubAuth, githubUrl);
- for (let attempt = 0; attempt <= backoffPeriods.length; attempt++) {
- const reqURL = mode === "actions"
- ? "PUT /repos/:owner/:repo/code-scanning/analysis"
- : "POST /repos/:owner/:repo/code-scanning/sarifs";
- const response = await client.request(reqURL, {
- owner: repositoryNwo.owner,
- repo: repositoryNwo.repo,
- data: payload,
- });
- logger.debug(`response status: ${response.status}`);
- const statusCode = response.status;
- if (statusCode === 202) {
- logger.info("Successfully uploaded results");
- return;
- }
- const requestID = response.headers["x-github-request-id"];
- // On any other status code that's not 5xx mark the upload as failed
- if (!statusCode || statusCode < 500 || statusCode >= 600) {
- throw new Error(`Upload failed (${requestID}): (${statusCode}) ${JSON.stringify(response.data)}`);
- }
- // On a 5xx status code we may retry the request
- if (attempt < backoffPeriods.length) {
- // Log the failure as a warning but don't mark the action as failed yet
- logger.warning(`Upload attempt (${attempt + 1} of ${backoffPeriods.length + 1}) failed (${requestID}). Retrying in ${backoffPeriods[attempt]} seconds: (${statusCode}) ${JSON.stringify(response.data)}`);
- // Sleep for the backoff period
- await new Promise((r) => setTimeout(r, backoffPeriods[attempt] * 1000));
- continue;
- }
- else {
- // If the upload fails with 5xx then we assume it is a temporary problem
- // and not an error that the user has caused or can fix.
- // We avoid marking the job as failed to avoid breaking CI workflows.
- throw new Error(`Upload failed (${requestID}): (${statusCode}) ${JSON.stringify(response.data)}`);
- }
- }
- // This case shouldn't ever happen as the final iteration of the loop
- // will always throw an error instead of exiting to here.
- throw new Error("Upload failed");
+ const reqURL = mode === "actions"
+ ? "PUT /repos/:owner/:repo/code-scanning/analysis"
+ : "POST /repos/:owner/:repo/code-scanning/sarifs";
+ const response = await client.request(reqURL, {
+ owner: repositoryNwo.owner,
+ repo: repositoryNwo.repo,
+ data: payload,
+ });
+ logger.debug(`response status: ${response.status}`);
+ logger.info("Successfully uploaded results");
}
// Uploads a single sarif file or a directory of sarif files
// depending on what the path happens to refer to.
diff --git a/lib/upload-lib.js.map b/lib/upload-lib.js.map
index 5f61cae81..ff438a816 100644
--- a/lib/upload-lib.js.map
+++ b/lib/upload-lib.js.map
@@ -1 +1 @@
-{"version":3,"file":"upload-lib.js","sourceRoot":"","sources":["../src/upload-lib.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,oDAAsC;AACtC,wDAA+B;AAC/B,uCAAyB;AACzB,uDAAyC;AACzC,2CAA6B;AAC7B,gDAAwB;AAExB,kDAAoC;AACpC,6DAA+C;AAG/C,gEAAkD;AAClD,6CAA+B;AAE/B,mEAAmE;AACnE,qDAAqD;AACrD,SAAgB,iBAAiB,CAAC,UAAoB;IACpD,MAAM,aAAa,GAAG;QACpB,OAAO,EAAE,IAAI;QACb,IAAI,EAAE,EAAW;KAClB,CAAC;IAEF,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE;QAClC,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC,CAAC;QACnE,sBAAsB;QACtB,IAAI,aAAa,CAAC,OAAO,KAAK,IAAI,EAAE;YAClC,aAAa,CAAC,OAAO,GAAG,WAAW,CAAC,OAAO,CAAC;SAC7C;aAAM,IAAI,aAAa,CAAC,OAAO,KAAK,WAAW,CAAC,OAAO,EAAE;YACxD,MAAM,yCAAyC,aAAa,CAAC,OAAO,QAAQ,WAAW,CAAC,OAAO,EAAE,CAAC;SACnG;QAED,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,WAAW,CAAC,IAAI,CAAC,CAAC;KAC9C;IAED,OAAO,IAAI,CAAC,SAAS,CAAC,aAAa,CAAC,CAAC;AACvC,CAAC;AAnBD,8CAmBC;AAED,4BAA4B;AAC5B,qEAAqE;AACrE,KAAK,UAAU,aAAa,CAC1B,OAAY,EACZ,aAA4B,EAC5B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,mBAAmB,CAAC,CAAC;IAEjC,sDAAsD;IACtD,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,KAAK,MAAM,IAAI,KAAK,CAAC;IAC9D,IAAI,QAAQ,EAAE;QACZ,OAAO;KACR;IAED,uDAAuD;IACvD,0CAA0C;IAC1C,4DAA4D;IAC5D,2DAA2D;IAC3D,MAAM,cAAc,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC;IAElC,MAAM,MAAM,GAAG,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,SAAS,CAAC,CAAC;IAEvD,KAAK,IAAI,OAAO,GAAG,CAAC,EAAE,OAAO,IAAI,cAAc,CAAC,MAAM,EAAE,OAAO,EAAE,EAAE;QACjE,MAAM,MAAM,GACV,IAAI,KAAK,SAAS;YAChB,CAAC,CAAC,gDAAgD;YAClD,CAAC,CAAC,+CAA+C,CAAC;QACtD,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,EAAE;YAC5C,KAAK,EAAE,aAAa,CAAC,KAAK;YAC1B,IAAI,EAAE,aAAa,CAAC,IAAI;YACxB,IAAI,EAAE,OAAO;SACd,CAAC,CAAC;QAEH,MAAM,CAAC,KAAK,CAAC,oBAAoB,QAAQ,CAAC,MAAM,EAAE,CAAC,CAAC;QAEpD,MAAM,UAAU,GAAG,QAAQ,CAAC,MAAM,CAAC;QACnC,IAAI,UAAU,KAAK,GAAG,EAAE;YACtB,MAAM,CAAC,IAAI,CAAC,+BAA+B,CAAC,CAAC;YAC7C,OAAO;SACR;QAED,MAAM,SAAS,GAAG,QAAQ,CAAC,OAAO,CAAC,qBAAqB,CAAC,CAAC;QAE1D,oEAAoE;QACpE,IAAI,CAAC,UAAU,IAAI,UAAU,GAAG,GAAG,IAAI,UAAU,IAAI,GAAG,EAAE;YACxD,MAAM,IAAI,KAAK,CACb,kBAAkB,SAAS,OAAO,UAAU,KAAK,IAAI,CAAC,SAAS,CAC7D,QAAQ,CAAC,IAAI,CACd,EAAE,CACJ,CAAC;SACH;QAED,gDAAgD;QAChD,IAAI,OAAO,GAAG,cAAc,CAAC,MAAM,EAAE;YACnC,uEAAuE;YACvE,MAAM,CAAC,OAAO,CACZ,mBAAmB,OAAO,GAAG,CAAC,OAC5B,cAAc,CAAC,MAAM,GAAG,CAC1B,aAAa,SAAS,kBACpB,cAAc,CAAC,OAAO,CACxB,cAAc,UAAU,KAAK,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE,CAC7D,CAAC;YACF,+BAA+B;YAC/B,MAAM,IAAI,OAAO,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,UAAU,CAAC,CAAC,EAAE,cAAc,CAAC,OAAO,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC;YACxE,SAAS;SACV;aAAM;YACL,wEAAwE;YACxE,wDAAwD;YACxD,qEAAqE;YACrE,MAAM,IAAI,KAAK,CACb,kBAAkB,SAAS,OAAO,UAAU,KAAK,IAAI,CAAC,SAAS,CAC7D,QAAQ,CAAC,IAAI,CACd,EAAE,CACJ,CAAC;SACH;KACF;IAED,qEAAqE;IACrE,yDAAyD;IACzD,MAAM,IAAI,KAAK,CAAC,eAAe,CAAC,CAAC;AACnC,CAAC;AAWD,4DAA4D;AAC5D,kDAAkD;AAClD,qDAAqD;AAC9C,KAAK,UAAU,MAAM,CAC1B,SAAiB,EACjB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,UAAU,GAAa,EAAE,CAAC;IAChC,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;QAC7B,MAAM,IAAI,KAAK,CAAC,wBAAwB,SAAS,EAAE,CAAC,CAAC;KACtD;IACD,IAAI,EAAE,CAAC,SAAS,CAAC,SAAS,CAAC,CAAC,WAAW,EAAE,EAAE;QACzC,MAAM,KAAK,GAAG,EAAE;aACb,WAAW,CAAC,SAAS,CAAC;aACtB,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;aACnC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC;QAC1C,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;YACxB,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACvB;QACD,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;YAC3B,MAAM,IAAI,KAAK,CAAC,sCAAsC,SAAS,IAAI,CAAC,CAAC;SACtE;KACF;SAAM;QACL,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;KAC5B;IAED,OAAO,MAAM,WAAW,CACtB,UAAU,EACV,aAAa,EACb,SAAS,EACT,GAAG,EACH,WAAW,EACX,YAAY,EACZ,aAAa,EACb,YAAY,EACZ,WAAW,EACX,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;AACJ,CAAC;AAjDD,wBAiDC;AAED,uDAAuD;AACvD,SAAgB,mBAAmB,CAAC,KAAa;IAC/C,IAAI,UAAU,GAAG,CAAC,CAAC;IACnB,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE;QACxC,UAAU,IAAI,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC;KAClC;IACD,OAAO,UAAU,CAAC;AACpB,CAAC;AAND,kDAMC;AAED,mEAAmE;AACnE,0CAA0C;AAC1C,SAAgB,uBAAuB,CAAC,aAAqB,EAAE,MAAc;IAC3E,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IACjE,MAAM,MAAM,GAAG,OAAO,CAAC,iCAAiC,CAAC,CAAC;IAE1D,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,SAAS,EAAE,CAAC,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC;IAClE,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE;QACjB,+EAA+E;QAC/E,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,MAAM,EAAE;YACjC,MAAM,CAAC,UAAU,CAAC,kBAAkB,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC;YACnD,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC;YAC5C,MAAM,CAAC,QAAQ,EAAE,CAAC;SACnB;QAED,8DAA8D;QAC9D,iFAAiF;QACjF,MAAM,WAAW,GAAG,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC;QAC7D,MAAM,IAAI,KAAK,CACb,qBAAqB,aAAa,gCAAgC,WAAW,CAAC,IAAI,CAChF,IAAI,CACL,EAAE,CACJ,CAAC;KACH;AACH,CAAC;AAtBD,0DAsBC;AAED,wCAAwC;AACxC,qDAAqD;AACrD,KAAK,UAAU,WAAW,CACxB,UAAoB,EACpB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,0BAA0B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;IAEpE,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,yFAAyF;QACzF,MAAM,cAAc,GAAG,qBAAqB,CAAC;QAC7C,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC,EAAE;YAC/B,MAAM,IAAI,KAAK,CACb,uGAAuG,CACxG,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,cAAc,EAAE,cAAc,CAAC,CAAC;KACrD;IAED,4EAA4E;IAC5E,KAAK,MAAM,IAAI,IAAI,UAAU,EAAE;QAC7B,uBAAuB,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;KACvC;IAED,IAAI,YAAY,GAAG,iBAAiB,CAAC,UAAU,CAAC,CAAC;IACjD,YAAY,GAAG,YAAY,CAAC,eAAe,CACzC,YAAY,EACZ,YAAY,EACZ,MAAM,CACP,CAAC;IAEF,MAAM,YAAY,GAAG,cAAI,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IACpE,MAAM,WAAW,GAAG,kBAAO,CAAC,YAAY,CAAC,CAAC;IAE1C,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,YAAY,CAAC,CAAC;IAElD,IAAI,OAAe,CAAC;IACpB,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,YAAY,EAAE,WAAW;YACzB,aAAa,EAAE,YAAY;YAC3B,KAAK,EAAE,YAAY;YACnB,eAAe,EAAE,aAAa;YAC9B,YAAY,EAAE,WAAW;YACzB,WAAW;YACX,UAAU,EAAE,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,0BAA0B,CAAC;YAC7D,UAAU,EAAE,SAAS;SACtB,CAAC,CAAC;KACJ;SAAM;QACL,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,KAAK,EAAE,YAAY;YACnB,YAAY,EAAE,WAAW;YACzB,SAAS,EAAE,SAAS,CAAC,CAAC,CAAC;SACxB,CAAC,CAAC;KACJ;IAED,4CAA4C;IAC5C,MAAM,kBAAkB,GAAG,YAAY,CAAC,MAAM,CAAC;IAC/C,MAAM,CAAC,KAAK,CAAC,oBAAoB,kBAAkB,QAAQ,CAAC,CAAC;IAC7D,MAAM,qBAAqB,GAAG,YAAY,CAAC,MAAM,CAAC;IAClD,MAAM,CAAC,KAAK,CAAC,8BAA8B,qBAAqB,QAAQ,CAAC,CAAC;IAC1E,MAAM,gBAAgB,GAAG,mBAAmB,CAAC,YAAY,CAAC,CAAC;IAC3D,MAAM,CAAC,KAAK,CAAC,gCAAgC,gBAAgB,EAAE,CAAC,CAAC;IAEjE,kBAAkB;IAClB,MAAM,aAAa,CACjB,OAAO,EACP,aAAa,EACb,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;IAEF,OAAO;QACL,qBAAqB,EAAE,kBAAkB;QACzC,wBAAwB,EAAE,qBAAqB;QAC/C,oBAAoB,EAAE,gBAAgB;KACvC,CAAC;AACJ,CAAC"}
\ No newline at end of file
+{"version":3,"file":"upload-lib.js","sourceRoot":"","sources":["../src/upload-lib.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,oDAAsC;AACtC,wDAA+B;AAC/B,uCAAyB;AACzB,uDAAyC;AACzC,2CAA6B;AAC7B,gDAAwB;AAExB,kDAAoC;AACpC,6DAA+C;AAG/C,gEAAkD;AAClD,6CAA+B;AAE/B,mEAAmE;AACnE,qDAAqD;AACrD,SAAgB,iBAAiB,CAAC,UAAoB;IACpD,MAAM,aAAa,GAAG;QACpB,OAAO,EAAE,IAAI;QACb,IAAI,EAAE,EAAW;KAClB,CAAC;IAEF,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE;QAClC,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC,CAAC;QACnE,sBAAsB;QACtB,IAAI,aAAa,CAAC,OAAO,KAAK,IAAI,EAAE;YAClC,aAAa,CAAC,OAAO,GAAG,WAAW,CAAC,OAAO,CAAC;SAC7C;aAAM,IAAI,aAAa,CAAC,OAAO,KAAK,WAAW,CAAC,OAAO,EAAE;YACxD,MAAM,yCAAyC,aAAa,CAAC,OAAO,QAAQ,WAAW,CAAC,OAAO,EAAE,CAAC;SACnG;QAED,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,WAAW,CAAC,IAAI,CAAC,CAAC;KAC9C;IAED,OAAO,IAAI,CAAC,SAAS,CAAC,aAAa,CAAC,CAAC;AACvC,CAAC;AAnBD,8CAmBC;AAED,4BAA4B;AAC5B,qEAAqE;AACrE,KAAK,UAAU,aAAa,CAC1B,OAAY,EACZ,aAA4B,EAC5B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,mBAAmB,CAAC,CAAC;IAEjC,sDAAsD;IACtD,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,KAAK,MAAM,IAAI,KAAK,CAAC;IAC9D,IAAI,QAAQ,EAAE;QACZ,OAAO;KACR;IAED,MAAM,MAAM,GAAG,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,SAAS,CAAC,CAAC;IAEvD,MAAM,MAAM,GACV,IAAI,KAAK,SAAS;QAChB,CAAC,CAAC,gDAAgD;QAClD,CAAC,CAAC,+CAA+C,CAAC;IACtD,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,EAAE;QAC5C,KAAK,EAAE,aAAa,CAAC,KAAK;QAC1B,IAAI,EAAE,aAAa,CAAC,IAAI;QACxB,IAAI,EAAE,OAAO;KACd,CAAC,CAAC;IAEH,MAAM,CAAC,KAAK,CAAC,oBAAoB,QAAQ,CAAC,MAAM,EAAE,CAAC,CAAC;IACpD,MAAM,CAAC,IAAI,CAAC,+BAA+B,CAAC,CAAC;AAC/C,CAAC;AAWD,4DAA4D;AAC5D,kDAAkD;AAClD,qDAAqD;AAC9C,KAAK,UAAU,MAAM,CAC1B,SAAiB,EACjB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,UAAU,GAAa,EAAE,CAAC;IAChC,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;QAC7B,MAAM,IAAI,KAAK,CAAC,wBAAwB,SAAS,EAAE,CAAC,CAAC;KACtD;IACD,IAAI,EAAE,CAAC,SAAS,CAAC,SAAS,CAAC,CAAC,WAAW,EAAE,EAAE;QACzC,MAAM,KAAK,GAAG,EAAE;aACb,WAAW,CAAC,SAAS,CAAC;aACtB,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;aACnC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC;QAC1C,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;YACxB,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACvB;QACD,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;YAC3B,MAAM,IAAI,KAAK,CAAC,sCAAsC,SAAS,IAAI,CAAC,CAAC;SACtE;KACF;SAAM;QACL,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;KAC5B;IAED,OAAO,MAAM,WAAW,CACtB,UAAU,EACV,aAAa,EACb,SAAS,EACT,GAAG,EACH,WAAW,EACX,YAAY,EACZ,aAAa,EACb,YAAY,EACZ,WAAW,EACX,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;AACJ,CAAC;AAjDD,wBAiDC;AAED,uDAAuD;AACvD,SAAgB,mBAAmB,CAAC,KAAa;IAC/C,IAAI,UAAU,GAAG,CAAC,CAAC;IACnB,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE;QACxC,UAAU,IAAI,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC;KAClC;IACD,OAAO,UAAU,CAAC;AACpB,CAAC;AAND,kDAMC;AAED,mEAAmE;AACnE,0CAA0C;AAC1C,SAAgB,uBAAuB,CAAC,aAAqB,EAAE,MAAc;IAC3E,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IACjE,MAAM,MAAM,GAAG,OAAO,CAAC,iCAAiC,CAAC,CAAC;IAE1D,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,SAAS,EAAE,CAAC,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC;IAClE,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE;QACjB,+EAA+E;QAC/E,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,MAAM,EAAE;YACjC,MAAM,CAAC,UAAU,CAAC,kBAAkB,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC;YACnD,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC;YAC5C,MAAM,CAAC,QAAQ,EAAE,CAAC;SACnB;QAED,8DAA8D;QAC9D,iFAAiF;QACjF,MAAM,WAAW,GAAG,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC;QAC7D,MAAM,IAAI,KAAK,CACb,qBAAqB,aAAa,gCAAgC,WAAW,CAAC,IAAI,CAChF,IAAI,CACL,EAAE,CACJ,CAAC;KACH;AACH,CAAC;AAtBD,0DAsBC;AAED,wCAAwC;AACxC,qDAAqD;AACrD,KAAK,UAAU,WAAW,CACxB,UAAoB,EACpB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,0BAA0B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;IAEpE,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,yFAAyF;QACzF,MAAM,cAAc,GAAG,qBAAqB,CAAC;QAC7C,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC,EAAE;YAC/B,MAAM,IAAI,KAAK,CACb,uGAAuG,CACxG,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,cAAc,EAAE,cAAc,CAAC,CAAC;KACrD;IAED,4EAA4E;IAC5E,KAAK,MAAM,IAAI,IAAI,UAAU,EAAE;QAC7B,uBAAuB,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;KACvC;IAED,IAAI,YAAY,GAAG,iBAAiB,CAAC,UAAU,CAAC,CAAC;IACjD,YAAY,GAAG,YAAY,CAAC,eAAe,CACzC,YAAY,EACZ,YAAY,EACZ,MAAM,CACP,CAAC;IAEF,MAAM,YAAY,GAAG,cAAI,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IACpE,MAAM,WAAW,GAAG,kBAAO,CAAC,YAAY,CAAC,CAAC;IAE1C,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,YAAY,CAAC,CAAC;IAElD,IAAI,OAAe,CAAC;IACpB,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,YAAY,EAAE,WAAW;YACzB,aAAa,EAAE,YAAY;YAC3B,KAAK,EAAE,YAAY;YACnB,eAAe,EAAE,aAAa;YAC9B,YAAY,EAAE,WAAW;YACzB,WAAW;YACX,UAAU,EAAE,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,0BAA0B,CAAC;YAC7D,UAAU,EAAE,SAAS;SACtB,CAAC,CAAC;KACJ;SAAM;QACL,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,KAAK,EAAE,YAAY;YACnB,YAAY,EAAE,WAAW;YACzB,SAAS,EAAE,SAAS,CAAC,CAAC,CAAC;SACxB,CAAC,CAAC;KACJ;IAED,4CAA4C;IAC5C,MAAM,kBAAkB,GAAG,YAAY,CAAC,MAAM,CAAC;IAC/C,MAAM,CAAC,KAAK,CAAC,oBAAoB,kBAAkB,QAAQ,CAAC,CAAC;IAC7D,MAAM,qBAAqB,GAAG,YAAY,CAAC,MAAM,CAAC;IAClD,MAAM,CAAC,KAAK,CAAC,8BAA8B,qBAAqB,QAAQ,CAAC,CAAC;IAC1E,MAAM,gBAAgB,GAAG,mBAAmB,CAAC,YAAY,CAAC,CAAC;IAC3D,MAAM,CAAC,KAAK,CAAC,gCAAgC,gBAAgB,EAAE,CAAC,CAAC;IAEjE,kBAAkB;IAClB,MAAM,aAAa,CACjB,OAAO,EACP,aAAa,EACb,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;IAEF,OAAO;QACL,qBAAqB,EAAE,kBAAkB;QACzC,wBAAwB,EAAE,qBAAqB;QAC/C,oBAAoB,EAAE,gBAAgB;KACvC,CAAC;AACJ,CAAC"}
\ No newline at end of file
diff --git a/node_modules/@octokit/plugin-retry/LICENSE b/node_modules/@octokit/plugin-retry/LICENSE
new file mode 100644
index 000000000..12d45d98c
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Octokit contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/node_modules/@octokit/plugin-retry/README.md b/node_modules/@octokit/plugin-retry/README.md
new file mode 100644
index 000000000..46ec98982
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/README.md
@@ -0,0 +1,105 @@
+# plugin-retry.js
+
+> Retries requests for server 4xx/5xx responses except `400`, `401`, `403` and `404`.
+
+[](https://www.npmjs.com/package/@octokit/plugin-retry)
+[](https://github.com/octokit/plugin-retry.js/actions?workflow=Test)
+
+## Usage
+
+
+
+
+Browsers
+ |
+
+Load `@octokit/plugin-retry` and [`@octokit/core`](https://github.com/octokit/core.js) (or core-compatible module) directly from [cdn.pika.dev](https://cdn.pika.dev)
+
+```html
+
+```
+
+ |
+
+Node
+ |
+
+Install with `npm install @octokit/core @octokit/plugin-retry`. Optionally replace `@octokit/core` with a core-compatible module
+
+```js
+const { Octokit } = require("@octokit/core");
+const { retry } = require("@octokit/plugin-retry");
+```
+
+ |
+
+
+
+**Note**: If you use it with `@octokit/rest` v16, install `@octokit/core` as a devDependency. This is only temporary and will no longer be necessary with `@octokit/rest` v17.
+
+```js
+const MyOctokit = Octokit.plugin(retry);
+const octokit = new MyOctokit({ auth: "secret123" });
+
+// retries request up to 3 times in case of a 500 response
+octokit.request("/").catch((error) => {
+ if (error.request.request.retryCount) {
+ console.log(
+ `request failed after ${error.request.request.retryCount} retries`
+ );
+ }
+
+ console.error(error);
+});
+```
+
+To override the default `doNotRetry` list:
+
+```js
+const octokit = new MyOctokit({
+ auth: "secret123",
+ retry: {
+ doNotRetry: [
+ /* List of HTTP 4xx/5xx status codes */
+ ],
+ },
+});
+```
+
+To override the number of retries:
+
+```js
+const octokit = new MyOctokit({
+ auth: "secret123",
+ request: { retries: 1 },
+});
+```
+
+You can manually ask for retries for any request by passing `{ request: { retries: numRetries, retryAfter: delayInSeconds }}`
+
+```js
+octokit
+ .request("/", { request: { retries: 1, retryAfter: 1 } })
+ .catch((error) => {
+ if (error.request.request.retryCount) {
+ console.log(
+ `request failed after ${error.request.request.retryCount} retries`
+ );
+ }
+
+ console.error(error);
+ });
+```
+
+Pass `{ retry: { enabled: false } }` to disable this plugin.
+
+## Contributing
+
+See [CONTRIBUTING.md](CONTRIBUTING.md)
+
+## License
+
+[MIT](LICENSE)
diff --git a/node_modules/@octokit/plugin-retry/dist-node/index.js b/node_modules/@octokit/plugin-retry/dist-node/index.js
new file mode 100644
index 000000000..674e2c328
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-node/index.js
@@ -0,0 +1,75 @@
+'use strict';
+
+Object.defineProperty(exports, '__esModule', { value: true });
+
+function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; }
+
+var Bottleneck = _interopDefault(require('bottleneck/light'));
+
+// @ts-ignore
+async function errorRequest(octokit, state, error, options) {
+ if (!error.request || !error.request.request) {
+ // address https://github.com/octokit/plugin-retry.js/issues/8
+ throw error;
+ } // retry all >= 400 && not doNotRetry
+
+
+ if (error.status >= 400 && !state.doNotRetry.includes(error.status)) {
+ const retries = options.request.retries != null ? options.request.retries : state.retries;
+ const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2);
+ throw octokit.retry.retryRequest(error, retries, retryAfter);
+ } // Maybe eventually there will be more cases here
+
+
+ throw error;
+}
+
+// @ts-ignore
+
+async function wrapRequest(state, request, options) {
+ const limiter = new Bottleneck(); // @ts-ignore
+
+ limiter.on("failed", function (error, info) {
+ const maxRetries = ~~error.request.request.retries;
+ const after = ~~error.request.request.retryAfter;
+ options.request.retryCount = info.retryCount + 1;
+
+ if (maxRetries > info.retryCount) {
+ // Returning a number instructs the limiter to retry
+ // the request after that number of milliseconds have passed
+ return after * state.retryAfterBaseValue;
+ }
+ });
+ return limiter.schedule(request, options);
+}
+
+const VERSION = "3.0.3";
+function retry(octokit, octokitOptions = {}) {
+ const state = Object.assign({
+ enabled: true,
+ retryAfterBaseValue: 1000,
+ doNotRetry: [400, 401, 403, 404, 422],
+ retries: 3
+ }, octokitOptions.retry);
+ octokit.retry = {
+ retryRequest: (error, retries, retryAfter) => {
+ error.request.request = Object.assign({}, error.request.request, {
+ retries: retries,
+ retryAfter: retryAfter
+ });
+ return error;
+ }
+ };
+
+ if (!state.enabled) {
+ return;
+ }
+
+ octokit.hook.error("request", errorRequest.bind(null, octokit, state));
+ octokit.hook.wrap("request", wrapRequest.bind(null, state));
+}
+retry.VERSION = VERSION;
+
+exports.VERSION = VERSION;
+exports.retry = retry;
+//# sourceMappingURL=index.js.map
diff --git a/node_modules/@octokit/plugin-retry/dist-node/index.js.map b/node_modules/@octokit/plugin-retry/dist-node/index.js.map
new file mode 100644
index 000000000..e0c9041f2
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-node/index.js.map
@@ -0,0 +1 @@
+{"version":3,"file":"index.js","sources":["../dist-src/error-request.js","../dist-src/wrap-request.js","../dist-src/index.js"],"sourcesContent":["// @ts-ignore\nexport async function errorRequest(octokit, state, error, options) {\n if (!error.request || !error.request.request) {\n // address https://github.com/octokit/plugin-retry.js/issues/8\n throw error;\n }\n // retry all >= 400 && not doNotRetry\n if (error.status >= 400 && !state.doNotRetry.includes(error.status)) {\n const retries = options.request.retries != null ? options.request.retries : state.retries;\n const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2);\n throw octokit.retry.retryRequest(error, retries, retryAfter);\n }\n // Maybe eventually there will be more cases here\n throw error;\n}\n","// @ts-ignore\nimport Bottleneck from \"bottleneck/light\";\n// @ts-ignore\nexport async function wrapRequest(state, request, options) {\n const limiter = new Bottleneck();\n // @ts-ignore\n limiter.on(\"failed\", function (error, info) {\n const maxRetries = ~~error.request.request.retries;\n const after = ~~error.request.request.retryAfter;\n options.request.retryCount = info.retryCount + 1;\n if (maxRetries > info.retryCount) {\n // Returning a number instructs the limiter to retry\n // the request after that number of milliseconds have passed\n return after * state.retryAfterBaseValue;\n }\n });\n return limiter.schedule(request, options);\n}\n","import { errorRequest } from \"./error-request\";\nimport { wrapRequest } from \"./wrap-request\";\nexport const VERSION = \"3.0.3\";\nexport function retry(octokit, octokitOptions = {}) {\n const state = Object.assign({\n enabled: true,\n retryAfterBaseValue: 1000,\n doNotRetry: [400, 401, 403, 404, 422],\n retries: 3,\n }, octokitOptions.retry);\n octokit.retry = {\n retryRequest: (error, retries, retryAfter) => {\n error.request.request = Object.assign({}, error.request.request, {\n retries: retries,\n retryAfter: retryAfter,\n });\n return error;\n },\n };\n if (!state.enabled) {\n return;\n }\n octokit.hook.error(\"request\", errorRequest.bind(null, octokit, state));\n octokit.hook.wrap(\"request\", wrapRequest.bind(null, state));\n}\nretry.VERSION = VERSION;\n"],"names":["errorRequest","octokit","state","error","options","request","status","doNotRetry","includes","retries","retryAfter","Math","pow","retryCount","retry","retryRequest","wrapRequest","limiter","Bottleneck","on","info","maxRetries","after","retryAfterBaseValue","schedule","VERSION","octokitOptions","Object","assign","enabled","hook","bind","wrap"],"mappings":";;;;;;;;AAAA;AACO,eAAeA,YAAf,CAA4BC,OAA5B,EAAqCC,KAArC,EAA4CC,KAA5C,EAAmDC,OAAnD,EAA4D;AAC/D,MAAI,CAACD,KAAK,CAACE,OAAP,IAAkB,CAACF,KAAK,CAACE,OAAN,CAAcA,OAArC,EAA8C;AAC1C;AACA,UAAMF,KAAN;AACH,GAJ8D;;;AAM/D,MAAIA,KAAK,CAACG,MAAN,IAAgB,GAAhB,IAAuB,CAACJ,KAAK,CAACK,UAAN,CAAiBC,QAAjB,CAA0BL,KAAK,CAACG,MAAhC,CAA5B,EAAqE;AACjE,UAAMG,OAAO,GAAGL,OAAO,CAACC,OAAR,CAAgBI,OAAhB,IAA2B,IAA3B,GAAkCL,OAAO,CAACC,OAAR,CAAgBI,OAAlD,GAA4DP,KAAK,CAACO,OAAlF;AACA,UAAMC,UAAU,GAAGC,IAAI,CAACC,GAAL,CAAS,CAACR,OAAO,CAACC,OAAR,CAAgBQ,UAAhB,IAA8B,CAA/B,IAAoC,CAA7C,EAAgD,CAAhD,CAAnB;AACA,UAAMZ,OAAO,CAACa,KAAR,CAAcC,YAAd,CAA2BZ,KAA3B,EAAkCM,OAAlC,EAA2CC,UAA3C,CAAN;AACH,GAV8D;;;AAY/D,QAAMP,KAAN;AACH;;ACdD;AACA;AAEA,AAAO,eAAea,WAAf,CAA2Bd,KAA3B,EAAkCG,OAAlC,EAA2CD,OAA3C,EAAoD;AACvD,QAAMa,OAAO,GAAG,IAAIC,UAAJ,EAAhB,CADuD;;AAGvDD,EAAAA,OAAO,CAACE,EAAR,CAAW,QAAX,EAAqB,UAAUhB,KAAV,EAAiBiB,IAAjB,EAAuB;AACxC,UAAMC,UAAU,GAAG,CAAC,CAAClB,KAAK,CAACE,OAAN,CAAcA,OAAd,CAAsBI,OAA3C;AACA,UAAMa,KAAK,GAAG,CAAC,CAACnB,KAAK,CAACE,OAAN,CAAcA,OAAd,CAAsBK,UAAtC;AACAN,IAAAA,OAAO,CAACC,OAAR,CAAgBQ,UAAhB,GAA6BO,IAAI,CAACP,UAAL,GAAkB,CAA/C;;AACA,QAAIQ,UAAU,GAAGD,IAAI,CAACP,UAAtB,EAAkC;AAC9B;AACA;AACA,aAAOS,KAAK,GAAGpB,KAAK,CAACqB,mBAArB;AACH;AACJ,GATD;AAUA,SAAON,OAAO,CAACO,QAAR,CAAiBnB,OAAjB,EAA0BD,OAA1B,CAAP;AACH;;MCfYqB,OAAO,GAAG,mBAAhB;AACP,AAAO,SAASX,KAAT,CAAeb,OAAf,EAAwByB,cAAc,GAAG,EAAzC,EAA6C;AAChD,QAAMxB,KAAK,GAAGyB,MAAM,CAACC,MAAP,CAAc;AACxBC,IAAAA,OAAO,EAAE,IADe;AAExBN,IAAAA,mBAAmB,EAAE,IAFG;AAGxBhB,IAAAA,UAAU,EAAE,CAAC,GAAD,EAAM,GAAN,EAAW,GAAX,EAAgB,GAAhB,EAAqB,GAArB,CAHY;AAIxBE,IAAAA,OAAO,EAAE;AAJe,GAAd,EAKXiB,cAAc,CAACZ,KALJ,CAAd;AAMAb,EAAAA,OAAO,CAACa,KAAR,GAAgB;AACZC,IAAAA,YAAY,EAAE,CAACZ,KAAD,EAAQM,OAAR,EAAiBC,UAAjB,KAAgC;AAC1CP,MAAAA,KAAK,CAACE,OAAN,CAAcA,OAAd,GAAwBsB,MAAM,CAACC,MAAP,CAAc,EAAd,EAAkBzB,KAAK,CAACE,OAAN,CAAcA,OAAhC,EAAyC;AAC7DI,QAAAA,OAAO,EAAEA,OADoD;AAE7DC,QAAAA,UAAU,EAAEA;AAFiD,OAAzC,CAAxB;AAIA,aAAOP,KAAP;AACH;AAPW,GAAhB;;AASA,MAAI,CAACD,KAAK,CAAC2B,OAAX,EAAoB;AAChB;AACH;;AACD5B,EAAAA,OAAO,CAAC6B,IAAR,CAAa3B,KAAb,CAAmB,SAAnB,EAA8BH,YAAY,CAAC+B,IAAb,CAAkB,IAAlB,EAAwB9B,OAAxB,EAAiCC,KAAjC,CAA9B;AACAD,EAAAA,OAAO,CAAC6B,IAAR,CAAaE,IAAb,CAAkB,SAAlB,EAA6BhB,WAAW,CAACe,IAAZ,CAAiB,IAAjB,EAAuB7B,KAAvB,CAA7B;AACH;AACDY,KAAK,CAACW,OAAN,GAAgBA,OAAhB;;;;;"}
\ No newline at end of file
diff --git a/node_modules/@octokit/plugin-retry/dist-src/error-request.js b/node_modules/@octokit/plugin-retry/dist-src/error-request.js
new file mode 100644
index 000000000..b79b01cb0
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-src/error-request.js
@@ -0,0 +1,15 @@
+// @ts-ignore
+export async function errorRequest(octokit, state, error, options) {
+ if (!error.request || !error.request.request) {
+ // address https://github.com/octokit/plugin-retry.js/issues/8
+ throw error;
+ }
+ // retry all >= 400 && not doNotRetry
+ if (error.status >= 400 && !state.doNotRetry.includes(error.status)) {
+ const retries = options.request.retries != null ? options.request.retries : state.retries;
+ const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2);
+ throw octokit.retry.retryRequest(error, retries, retryAfter);
+ }
+ // Maybe eventually there will be more cases here
+ throw error;
+}
diff --git a/node_modules/@octokit/plugin-retry/dist-src/index.js b/node_modules/@octokit/plugin-retry/dist-src/index.js
new file mode 100644
index 000000000..7849095ef
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-src/index.js
@@ -0,0 +1,26 @@
+import { errorRequest } from "./error-request";
+import { wrapRequest } from "./wrap-request";
+export const VERSION = "0.0.0-development";
+export function retry(octokit, octokitOptions = {}) {
+ const state = Object.assign({
+ enabled: true,
+ retryAfterBaseValue: 1000,
+ doNotRetry: [400, 401, 403, 404, 422],
+ retries: 3,
+ }, octokitOptions.retry);
+ octokit.retry = {
+ retryRequest: (error, retries, retryAfter) => {
+ error.request.request = Object.assign({}, error.request.request, {
+ retries: retries,
+ retryAfter: retryAfter,
+ });
+ return error;
+ },
+ };
+ if (!state.enabled) {
+ return;
+ }
+ octokit.hook.error("request", errorRequest.bind(null, octokit, state));
+ octokit.hook.wrap("request", wrapRequest.bind(null, state));
+}
+retry.VERSION = VERSION;
diff --git a/node_modules/@octokit/plugin-retry/dist-src/version.js b/node_modules/@octokit/plugin-retry/dist-src/version.js
new file mode 100644
index 000000000..9350c15e0
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-src/version.js
@@ -0,0 +1 @@
+export const VERSION = "3.0.3";
diff --git a/node_modules/@octokit/plugin-retry/dist-src/wrap-request.js b/node_modules/@octokit/plugin-retry/dist-src/wrap-request.js
new file mode 100644
index 000000000..1454e2fb5
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-src/wrap-request.js
@@ -0,0 +1,18 @@
+// @ts-ignore
+import Bottleneck from "bottleneck/light";
+// @ts-ignore
+export async function wrapRequest(state, request, options) {
+ const limiter = new Bottleneck();
+ // @ts-ignore
+ limiter.on("failed", function (error, info) {
+ const maxRetries = ~~error.request.request.retries;
+ const after = ~~error.request.request.retryAfter;
+ options.request.retryCount = info.retryCount + 1;
+ if (maxRetries > info.retryCount) {
+ // Returning a number instructs the limiter to retry
+ // the request after that number of milliseconds have passed
+ return after * state.retryAfterBaseValue;
+ }
+ });
+ return limiter.schedule(request, options);
+}
diff --git a/node_modules/@octokit/plugin-retry/dist-types/error-request.d.ts b/node_modules/@octokit/plugin-retry/dist-types/error-request.d.ts
new file mode 100644
index 000000000..220f3ca6e
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-types/error-request.d.ts
@@ -0,0 +1 @@
+export declare function errorRequest(octokit: any, state: any, error: any, options: any): Promise;
diff --git a/node_modules/@octokit/plugin-retry/dist-types/index.d.ts b/node_modules/@octokit/plugin-retry/dist-types/index.d.ts
new file mode 100644
index 000000000..92d5e4f31
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-types/index.d.ts
@@ -0,0 +1,6 @@
+import { Octokit } from "@octokit/core";
+export declare const VERSION = "0.0.0-development";
+export declare function retry(octokit: Octokit, octokitOptions?: ConstructorParameters[0]): void;
+export declare namespace retry {
+ var VERSION: string;
+}
diff --git a/node_modules/@octokit/plugin-retry/dist-types/version.d.ts b/node_modules/@octokit/plugin-retry/dist-types/version.d.ts
new file mode 100644
index 000000000..09c2448d3
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-types/version.d.ts
@@ -0,0 +1 @@
+export declare const VERSION = "3.0.3";
diff --git a/node_modules/@octokit/plugin-retry/dist-types/wrap-request.d.ts b/node_modules/@octokit/plugin-retry/dist-types/wrap-request.d.ts
new file mode 100644
index 000000000..9333a888b
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-types/wrap-request.d.ts
@@ -0,0 +1 @@
+export declare function wrapRequest(state: any, request: any, options: any): Promise;
diff --git a/node_modules/@octokit/plugin-retry/dist-web/index.js b/node_modules/@octokit/plugin-retry/dist-web/index.js
new file mode 100644
index 000000000..bbeea9bf8
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-web/index.js
@@ -0,0 +1,63 @@
+import Bottleneck from 'bottleneck/light';
+
+// @ts-ignore
+async function errorRequest(octokit, state, error, options) {
+ if (!error.request || !error.request.request) {
+ // address https://github.com/octokit/plugin-retry.js/issues/8
+ throw error;
+ }
+ // retry all >= 400 && not doNotRetry
+ if (error.status >= 400 && !state.doNotRetry.includes(error.status)) {
+ const retries = options.request.retries != null ? options.request.retries : state.retries;
+ const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2);
+ throw octokit.retry.retryRequest(error, retries, retryAfter);
+ }
+ // Maybe eventually there will be more cases here
+ throw error;
+}
+
+// @ts-ignore
+// @ts-ignore
+async function wrapRequest(state, request, options) {
+ const limiter = new Bottleneck();
+ // @ts-ignore
+ limiter.on("failed", function (error, info) {
+ const maxRetries = ~~error.request.request.retries;
+ const after = ~~error.request.request.retryAfter;
+ options.request.retryCount = info.retryCount + 1;
+ if (maxRetries > info.retryCount) {
+ // Returning a number instructs the limiter to retry
+ // the request after that number of milliseconds have passed
+ return after * state.retryAfterBaseValue;
+ }
+ });
+ return limiter.schedule(request, options);
+}
+
+const VERSION = "3.0.3";
+function retry(octokit, octokitOptions = {}) {
+ const state = Object.assign({
+ enabled: true,
+ retryAfterBaseValue: 1000,
+ doNotRetry: [400, 401, 403, 404, 422],
+ retries: 3,
+ }, octokitOptions.retry);
+ octokit.retry = {
+ retryRequest: (error, retries, retryAfter) => {
+ error.request.request = Object.assign({}, error.request.request, {
+ retries: retries,
+ retryAfter: retryAfter,
+ });
+ return error;
+ },
+ };
+ if (!state.enabled) {
+ return;
+ }
+ octokit.hook.error("request", errorRequest.bind(null, octokit, state));
+ octokit.hook.wrap("request", wrapRequest.bind(null, state));
+}
+retry.VERSION = VERSION;
+
+export { VERSION, retry };
+//# sourceMappingURL=index.js.map
diff --git a/node_modules/@octokit/plugin-retry/dist-web/index.js.map b/node_modules/@octokit/plugin-retry/dist-web/index.js.map
new file mode 100644
index 000000000..94a012fa6
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/dist-web/index.js.map
@@ -0,0 +1 @@
+{"version":3,"file":"index.js","sources":["../dist-src/error-request.js","../dist-src/wrap-request.js","../dist-src/index.js"],"sourcesContent":["// @ts-ignore\nexport async function errorRequest(octokit, state, error, options) {\n if (!error.request || !error.request.request) {\n // address https://github.com/octokit/plugin-retry.js/issues/8\n throw error;\n }\n // retry all >= 400 && not doNotRetry\n if (error.status >= 400 && !state.doNotRetry.includes(error.status)) {\n const retries = options.request.retries != null ? options.request.retries : state.retries;\n const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2);\n throw octokit.retry.retryRequest(error, retries, retryAfter);\n }\n // Maybe eventually there will be more cases here\n throw error;\n}\n","// @ts-ignore\nimport Bottleneck from \"bottleneck/light\";\n// @ts-ignore\nexport async function wrapRequest(state, request, options) {\n const limiter = new Bottleneck();\n // @ts-ignore\n limiter.on(\"failed\", function (error, info) {\n const maxRetries = ~~error.request.request.retries;\n const after = ~~error.request.request.retryAfter;\n options.request.retryCount = info.retryCount + 1;\n if (maxRetries > info.retryCount) {\n // Returning a number instructs the limiter to retry\n // the request after that number of milliseconds have passed\n return after * state.retryAfterBaseValue;\n }\n });\n return limiter.schedule(request, options);\n}\n","import { errorRequest } from \"./error-request\";\nimport { wrapRequest } from \"./wrap-request\";\nexport const VERSION = \"3.0.3\";\nexport function retry(octokit, octokitOptions = {}) {\n const state = Object.assign({\n enabled: true,\n retryAfterBaseValue: 1000,\n doNotRetry: [400, 401, 403, 404, 422],\n retries: 3,\n }, octokitOptions.retry);\n octokit.retry = {\n retryRequest: (error, retries, retryAfter) => {\n error.request.request = Object.assign({}, error.request.request, {\n retries: retries,\n retryAfter: retryAfter,\n });\n return error;\n },\n };\n if (!state.enabled) {\n return;\n }\n octokit.hook.error(\"request\", errorRequest.bind(null, octokit, state));\n octokit.hook.wrap(\"request\", wrapRequest.bind(null, state));\n}\nretry.VERSION = VERSION;\n"],"names":[],"mappings":";;AAAA;AACO,eAAe,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,KAAK,EAAE,OAAO,EAAE;AACnE,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,EAAE;AAClD;AACA,QAAQ,MAAM,KAAK,CAAC;AACpB,KAAK;AACL;AACA,IAAI,IAAI,KAAK,CAAC,MAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,QAAQ,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE;AACzE,QAAQ,MAAM,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC,OAAO,IAAI,IAAI,GAAG,OAAO,CAAC,OAAO,CAAC,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;AAClG,QAAQ,MAAM,UAAU,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,UAAU,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9E,QAAQ,MAAM,OAAO,CAAC,KAAK,CAAC,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,UAAU,CAAC,CAAC;AACrE,KAAK;AACL;AACA,IAAI,MAAM,KAAK,CAAC;AAChB;;ACdA;AACA,AACA;AACA,AAAO,eAAe,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,EAAE;AAC3D,IAAI,MAAM,OAAO,GAAG,IAAI,UAAU,EAAE,CAAC;AACrC;AACA,IAAI,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,KAAK,EAAE,IAAI,EAAE;AAChD,QAAQ,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO,CAAC;AAC3D,QAAQ,MAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,UAAU,CAAC;AACzD,QAAQ,OAAO,CAAC,OAAO,CAAC,UAAU,GAAG,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC;AACzD,QAAQ,IAAI,UAAU,GAAG,IAAI,CAAC,UAAU,EAAE;AAC1C;AACA;AACA,YAAY,OAAO,KAAK,GAAG,KAAK,CAAC,mBAAmB,CAAC;AACrD,SAAS;AACT,KAAK,CAAC,CAAC;AACP,IAAI,OAAO,OAAO,CAAC,QAAQ,CAAC,OAAO,EAAE,OAAO,CAAC,CAAC;AAC9C,CAAC;;ACfW,MAAC,OAAO,GAAG,mBAAmB,CAAC;AAC3C,AAAO,SAAS,KAAK,CAAC,OAAO,EAAE,cAAc,GAAG,EAAE,EAAE;AACpD,IAAI,MAAM,KAAK,GAAG,MAAM,CAAC,MAAM,CAAC;AAChC,QAAQ,OAAO,EAAE,IAAI;AACrB,QAAQ,mBAAmB,EAAE,IAAI;AACjC,QAAQ,UAAU,EAAE,CAAC,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,CAAC;AAC7C,QAAQ,OAAO,EAAE,CAAC;AAClB,KAAK,EAAE,cAAc,CAAC,KAAK,CAAC,CAAC;AAC7B,IAAI,OAAO,CAAC,KAAK,GAAG;AACpB,QAAQ,YAAY,EAAE,CAAC,KAAK,EAAE,OAAO,EAAE,UAAU,KAAK;AACtD,YAAY,KAAK,CAAC,OAAO,CAAC,OAAO,GAAG,MAAM,CAAC,MAAM,CAAC,EAAE,EAAE,KAAK,CAAC,OAAO,CAAC,OAAO,EAAE;AAC7E,gBAAgB,OAAO,EAAE,OAAO;AAChC,gBAAgB,UAAU,EAAE,UAAU;AACtC,aAAa,CAAC,CAAC;AACf,YAAY,OAAO,KAAK,CAAC;AACzB,SAAS;AACT,KAAK,CAAC;AACN,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE;AACxB,QAAQ,OAAO;AACf,KAAK;AACL,IAAI,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,EAAE,YAAY,CAAC,IAAI,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;AAC3E,IAAI,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,WAAW,CAAC,IAAI,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC,CAAC;AAChE,CAAC;AACD,KAAK,CAAC,OAAO,GAAG,OAAO,CAAC;;;;"}
\ No newline at end of file
diff --git a/node_modules/@octokit/plugin-retry/package.json b/node_modules/@octokit/plugin-retry/package.json
new file mode 100644
index 000000000..8915a50df
--- /dev/null
+++ b/node_modules/@octokit/plugin-retry/package.json
@@ -0,0 +1,49 @@
+{
+ "name": "@octokit/plugin-retry",
+ "description": "Automatic retry plugin for octokit",
+ "version": "3.0.3",
+ "license": "MIT",
+ "files": [
+ "dist-*/",
+ "bin/"
+ ],
+ "pika": true,
+ "sideEffects": false,
+ "homepage": "https://github.com/octokit/plugin-retry.js#readme",
+ "bugs": {
+ "url": "https://github.com/octokit/plugin-retry.js/issues"
+ },
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/octokit/plugin-retry.js.git"
+ },
+ "dependencies": {
+ "@octokit/types": "^5.0.0",
+ "bottleneck": "^2.15.3"
+ },
+ "devDependencies": {
+ "@octokit/core": "^2.0.0",
+ "@octokit/request-error": "^2.0.0",
+ "@pika/pack": "^0.5.0",
+ "@pika/plugin-build-node": "^0.9.0",
+ "@pika/plugin-build-web": "^0.9.0",
+ "@pika/plugin-ts-standard-pkg": "^0.9.0",
+ "@types/fetch-mock": "^7.3.1",
+ "@types/jest": "^26.0.0",
+ "@types/node": "^14.0.0",
+ "fetch-mock": "^9.0.0",
+ "jest": "^26.0.1",
+ "prettier": "^2.0.1",
+ "semantic-release": "^17.0.0",
+ "semantic-release-plugin-update-version-in-files": "^1.0.0",
+ "ts-jest": "^26.0.0",
+ "typescript": "^3.7.2"
+ },
+ "publishConfig": {
+ "access": "public"
+ },
+ "source": "dist-src/index.js",
+ "types": "dist-types/index.d.ts",
+ "main": "dist-node/index.js",
+ "module": "dist-web/index.js"
+}
\ No newline at end of file
diff --git a/node_modules/bottleneck/.babelrc.es5 b/node_modules/bottleneck/.babelrc.es5
new file mode 100644
index 000000000..e7120e3d0
--- /dev/null
+++ b/node_modules/bottleneck/.babelrc.es5
@@ -0,0 +1,5 @@
+{
+ "presets": [
+ ["@babel/preset-env", {}]
+ ]
+}
\ No newline at end of file
diff --git a/node_modules/bottleneck/.babelrc.lib b/node_modules/bottleneck/.babelrc.lib
new file mode 100644
index 000000000..de9dbbad9
--- /dev/null
+++ b/node_modules/bottleneck/.babelrc.lib
@@ -0,0 +1,9 @@
+{
+ "presets": [
+ ["@babel/preset-env", {
+ "targets": {
+ "node": "6.0"
+ }
+ }]
+ ]
+}
\ No newline at end of file
diff --git a/node_modules/bottleneck/.env b/node_modules/bottleneck/.env
new file mode 100644
index 000000000..7afc96eec
--- /dev/null
+++ b/node_modules/bottleneck/.env
@@ -0,0 +1,2 @@
+REDIS_HOST=127.0.0.1
+REDIS_PORT=6379
diff --git a/node_modules/bottleneck/.travis.yml b/node_modules/bottleneck/.travis.yml
new file mode 100644
index 000000000..8204ece5c
--- /dev/null
+++ b/node_modules/bottleneck/.travis.yml
@@ -0,0 +1,25 @@
+language: node_js
+node_js:
+ - 8
+branches:
+ only:
+ - master
+ - next
+services:
+ - redis-server
+env:
+ global:
+ - "REDIS_HOST=127.0.0.1"
+ - "REDIS_PORT=6379"
+cache:
+ directories:
+ - $HOME/.npm
+install:
+- npm i
+sudo: required
+after_success: npx codecov --file=./coverage/lcov.info
+script: npm run test-all
+
+before_install:
+ - npm i -g npm@5.10
+ - npm --version
\ No newline at end of file
diff --git a/node_modules/bottleneck/LICENSE b/node_modules/bottleneck/LICENSE
new file mode 100644
index 000000000..835fc3145
--- /dev/null
+++ b/node_modules/bottleneck/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Grondin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/node_modules/bottleneck/README.md b/node_modules/bottleneck/README.md
new file mode 100644
index 000000000..bb8d52f84
--- /dev/null
+++ b/node_modules/bottleneck/README.md
@@ -0,0 +1,1027 @@
+# bottleneck
+
+[![Downloads][npm-downloads]][npm-url]
+[![version][npm-version]][npm-url]
+[![License][npm-license]][license-url]
+
+
+Bottleneck is a lightweight and zero-dependency Task Scheduler and Rate Limiter for Node.js and the browser.
+
+Bottleneck is an easy solution as it adds very little complexity to your code. It is battle-hardened, reliable and production-ready and used on a large scale in private companies and open source software.
+
+It supports **Clustering**: it can rate limit jobs across multiple Node.js instances. It uses Redis and strictly atomic operations to stay reliable in the presence of unreliable clients and networks. It also supports *Redis Cluster* and *Redis Sentinel*.
+
+**[Upgrading from version 1?](#upgrading-to-v2)**
+
+
+
+- [Install](#install)
+- [Quick Start](#quick-start)
+ * [Gotchas & Common Mistakes](#gotchas--common-mistakes)
+- [Constructor](#constructor)
+- [Reservoir Intervals](#reservoir-intervals)
+- [`submit()`](#submit)
+- [`schedule()`](#schedule)
+- [`wrap()`](#wrap)
+- [Job Options](#job-options)
+- [Jobs Lifecycle](#jobs-lifecycle)
+- [Events](#events)
+- [Retries](#retries)
+- [`updateSettings()`](#updatesettings)
+- [`incrementReservoir()`](#incrementreservoir)
+- [`currentReservoir()`](#currentreservoir)
+- [`stop()`](#stop)
+- [`chain()`](#chain)
+- [Group](#group)
+- [Batching](#batching)
+- [Clustering](#clustering)
+- [Debugging Your Application](#debugging-your-application)
+- [Upgrading To v2](#upgrading-to-v2)
+- [Contributing](#contributing)
+
+
+
+## Install
+
+```
+npm install --save bottleneck
+```
+
+```js
+import Bottleneck from "bottleneck";
+
+// Note: To support older browsers and Node <6.0, you must import the ES5 bundle instead.
+var Bottleneck = require("bottleneck/es5");
+```
+
+## Quick Start
+
+### Step 1 of 3
+
+Most APIs have a rate limit. For example, to execute 3 requests per second:
+```js
+const limiter = new Bottleneck({
+ minTime: 333
+});
+```
+
+If there's a chance some requests might take longer than 333ms and you want to prevent more than 1 request from running at a time, add `maxConcurrent: 1`:
+```js
+const limiter = new Bottleneck({
+ maxConcurrent: 1,
+ minTime: 333
+});
+```
+
+`minTime` and `maxConcurrent` are enough for the majority of use cases. They work well together to ensure a smooth rate of requests. If your use case requires executing requests in **bursts** or every time a quota resets, look into [Reservoir Intervals](#reservoir-intervals).
+
+### Step 2 of 3
+
+#### ➤ Using promises?
+
+Instead of this:
+```js
+myFunction(arg1, arg2)
+.then((result) => {
+ /* handle result */
+});
+```
+Do this:
+```js
+limiter.schedule(() => myFunction(arg1, arg2))
+.then((result) => {
+ /* handle result */
+});
+```
+Or this:
+```js
+const wrapped = limiter.wrap(myFunction);
+
+wrapped(arg1, arg2)
+.then((result) => {
+ /* handle result */
+});
+```
+
+#### ➤ Using async/await?
+
+Instead of this:
+```js
+const result = await myFunction(arg1, arg2);
+```
+Do this:
+```js
+const result = await limiter.schedule(() => myFunction(arg1, arg2));
+```
+Or this:
+```js
+const wrapped = limiter.wrap(myFunction);
+
+const result = await wrapped(arg1, arg2);
+```
+
+#### ➤ Using callbacks?
+
+Instead of this:
+```js
+someAsyncCall(arg1, arg2, callback);
+```
+Do this:
+```js
+limiter.submit(someAsyncCall, arg1, arg2, callback);
+```
+
+### Step 3 of 3
+
+Remember...
+
+Bottleneck builds a queue of jobs and executes them as soon as possible. By default, the jobs will be executed in the order they were received.
+
+**Read the 'Gotchas' and you're good to go**. Or keep reading to learn about all the fine tuning and advanced options available. If your rate limits need to be enforced across a cluster of computers, read the [Clustering](#clustering) docs.
+
+[Need help debugging your application?](#debugging-your-application)
+
+Instead of throttling maybe [you want to batch up requests](#batching) into fewer calls?
+
+### Gotchas & Common Mistakes
+
+* Make sure the function you pass to `schedule()` or `wrap()` only returns once **all the work it does** has completed.
+
+Instead of this:
+```js
+limiter.schedule(() => {
+ tasksArray.forEach(x => processTask(x));
+ // BAD, we return before our processTask() functions are finished processing!
+});
+```
+Do this:
+```js
+limiter.schedule(() => {
+ const allTasks = tasksArray.map(x => processTask(x));
+ // GOOD, we wait until all tasks are done.
+ return Promise.all(allTasks);
+});
+```
+
+* If you're passing an object's method as a job, you'll probably need to `bind()` the object:
+```js
+// instead of this:
+limiter.schedule(object.doSomething);
+// do this:
+limiter.schedule(object.doSomething.bind(object));
+// or, wrap it in an arrow function instead:
+limiter.schedule(() => object.doSomething());
+```
+
+* Bottleneck requires Node 6+ to function. However, an ES5 build is included: `var Bottleneck = require("bottleneck/es5");`.
+
+* Make sure you're catching `"error"` events emitted by your limiters!
+
+* Consider setting a `maxConcurrent` value instead of leaving it `null`. This can help your application's performance, especially if you think the limiter's queue might become very long.
+
+* If you plan on using `priorities`, make sure to set a `maxConcurrent` value.
+
+* **When using `submit()`**, if a callback isn't necessary, you must pass `null` or an empty function instead. It will not work otherwise.
+
+* **When using `submit()`**, make sure all the jobs will eventually complete by calling their callback, or set an [`expiration`](#job-options). Even if you submitted your job with a `null` callback , it still needs to call its callback. This is particularly important if you are using a `maxConcurrent` value that isn't `null` (unlimited), otherwise those not completed jobs will be clogging up the limiter and no new jobs will be allowed to run. It's safe to call the callback more than once, subsequent calls are ignored.
+
+## Docs
+
+### Constructor
+
+```js
+const limiter = new Bottleneck({/* options */});
+```
+
+Basic options:
+
+| Option | Default | Description |
+|--------|---------|-------------|
+| `maxConcurrent` | `null` (unlimited) | How many jobs can be executing at the same time. Consider setting a value instead of leaving it `null`, it can help your application's performance, especially if you think the limiter's queue might get very long. |
+| `minTime` | `0` ms | How long to wait after launching a job before launching another one. |
+| `highWater` | `null` (unlimited) | How long can the queue be? When the queue length exceeds that value, the selected `strategy` is executed to shed the load. |
+| `strategy` | `Bottleneck.strategy.LEAK` | Which strategy to use when the queue gets longer than the high water mark. [Read about strategies](#strategies). Strategies are never executed if `highWater` is `null`. |
+| `penalty` | `15 * minTime`, or `5000` when `minTime` is `0` | The `penalty` value used by the `BLOCK` strategy. |
+| `reservoir` | `null` (unlimited) | How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`. New jobs will still be queued up. |
+| `reservoirRefreshInterval` | `null` (disabled) | Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically updated to the value of `reservoirRefreshAmount`. The `reservoirRefreshInterval` value should be a [multiple of 250 (5000 for Clustering)](https://github.com/SGrondin/bottleneck/issues/88). |
+| `reservoirRefreshAmount` | `null` (disabled) | The value to set `reservoir` to when `reservoirRefreshInterval` is in use. |
+| `reservoirIncreaseInterval` | `null` (disabled) | Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`. The `reservoirIncreaseInterval` value should be a [multiple of 250 (5000 for Clustering)](https://github.com/SGrondin/bottleneck/issues/88). |
+| `reservoirIncreaseAmount` | `null` (disabled) | The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use. |
+| `reservoirIncreaseMaximum` | `null` (disabled) | The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use. |
+| `Promise` | `Promise` (built-in) | This lets you override the Promise library used by Bottleneck. |
+
+
+### Reservoir Intervals
+
+Reservoir Intervals let you execute requests in bursts, by automatically controlling the limiter's `reservoir` value. The `reservoir` is simply the number of jobs the limiter is allowed to execute. Once the value reaches 0, it stops starting new jobs.
+
+There are 2 types of Reservoir Intervals: Refresh Intervals and Increase Intervals.
+
+#### Refresh Interval
+
+In this example, we throttle to 100 requests every 60 seconds:
+
+```js
+const limiter = new Bottleneck({
+ reservoir: 100, // initial value
+ reservoirRefreshAmount: 100,
+ reservoirRefreshInterval: 60 * 1000, // must be divisible by 250
+
+ // also use maxConcurrent and/or minTime for safety
+ maxConcurrent: 1,
+ minTime: 333 // pick a value that makes sense for your use case
+});
+```
+`reservoir` is a counter decremented every time a job is launched, we set its initial value to 100. Then, every `reservoirRefreshInterval` (60000 ms), `reservoir` is automatically updated to be equal to the `reservoirRefreshAmount` (100).
+
+#### Increase Interval
+
+In this example, we throttle jobs to meet the Shopify API Rate Limits. Users are allowed to send 40 requests initially, then every second grants 2 more requests up to a maximum of 40.
+
+```js
+const limiter = new Bottleneck({
+ reservoir: 40, // initial value
+ reservoirIncreaseAmount: 2,
+ reservoirIncreaseInterval: 1000, // must be divisible by 250
+ reservoirIncreaseMaximum: 40,
+
+ // also use maxConcurrent and/or minTime for safety
+ maxConcurrent: 5,
+ minTime: 250 // pick a value that makes sense for your use case
+});
+```
+
+#### Warnings
+
+Reservoir Intervals are an advanced feature, please take the time to read and understand the following warnings.
+
+- **Reservoir Intervals are not a replacement for `minTime` and `maxConcurrent`.** It's strongly recommended to also use `minTime` and/or `maxConcurrent` to spread out the load. For example, suppose a lot of jobs are queued up because the `reservoir` is 0. Every time the Refresh Interval is triggered, a number of jobs equal to `reservoirRefreshAmount` will automatically be launched, all at the same time! To prevent this flooding effect and keep your application running smoothly, use `minTime` and `maxConcurrent` to **stagger** the jobs.
+
+- **The Reservoir Interval starts from the moment the limiter is created**. Let's suppose we're using `reservoirRefreshAmount: 5`. If you happen to add 10 jobs just 1ms before the refresh is triggered, the first 5 will run immediately, then 1ms later it will refresh the reservoir value and that will make the last 5 also run right away. It will have run 10 jobs in just over 1ms no matter what your reservoir interval was!
+
+- **Reservoir Intervals prevent a limiter from being garbage collected.** Call `limiter.disconnect()` to clear the interval and allow the memory to be freed. However, it's not necessary to call `.disconnect()` to allow the Node.js process to exit.
+
+### submit()
+
+Adds a job to the queue. This is the callback version of `schedule()`.
+```js
+limiter.submit(someAsyncCall, arg1, arg2, callback);
+```
+You can pass `null` instead of an empty function if there is no callback, but `someAsyncCall` still needs to call **its** callback to let the limiter know it has completed its work.
+
+`submit()` can also accept [advanced options](#job-options).
+
+### schedule()
+
+Adds a job to the queue. This is the Promise and async/await version of `submit()`.
+```js
+const fn = function(arg1, arg2) {
+ return httpGet(arg1, arg2); // Here httpGet() returns a promise
+};
+
+limiter.schedule(fn, arg1, arg2)
+.then((result) => {
+ /* ... */
+});
+```
+In other words, `schedule()` takes a function **fn** and a list of arguments. `schedule()` returns a promise that will be executed according to the rate limits.
+
+`schedule()` can also accept [advanced options](#job-options).
+
+Here's another example:
+```js
+// suppose that `client.get(url)` returns a promise
+
+const url = "https://wikipedia.org";
+
+limiter.schedule(() => client.get(url))
+.then(response => console.log(response.body));
+```
+
+### wrap()
+
+Takes a function that returns a promise. Returns a function identical to the original, but rate limited.
+```js
+const wrapped = limiter.wrap(fn);
+
+wrapped()
+.then(function (result) {
+ /* ... */
+})
+.catch(function (error) {
+ // Bottleneck might need to fail the job even if the original function can never fail.
+ // For example, your job is taking longer than the `expiration` time you've set.
+});
+```
+
+### Job Options
+
+`submit()`, `schedule()`, and `wrap()` all accept advanced options.
+```js
+// Submit
+limiter.submit({/* options */}, someAsyncCall, arg1, arg2, callback);
+
+// Schedule
+limiter.schedule({/* options */}, fn, arg1, arg2);
+
+// Wrap
+const wrapped = limiter.wrap(fn);
+wrapped.withOptions({/* options */}, arg1, arg2);
+```
+
+| Option | Default | Description |
+|--------|---------|-------------|
+| `priority` | `5` | A priority between `0` and `9`. A job with a priority of `4` will be queued ahead of a job with a priority of `5`. **Important:** You must set a low `maxConcurrent` value for priorities to work, otherwise there is nothing to queue because jobs will be be scheduled immediately! |
+| `weight` | `1` | Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`) and decreases the `reservoir` value. |
+| `expiration` | `null` (unlimited) | The number of milliseconds a job is given to complete. Jobs that execute for longer than `expiration` ms will be failed with a `BottleneckError`. |
+| `id` | `` | You should give an ID to your jobs, it helps with [debugging](#debugging-your-application). |
+
+### Strategies
+
+A strategy is a simple algorithm that is executed every time adding a job would cause the number of queued jobs to exceed `highWater`. Strategies are never executed if `highWater` is `null`.
+
+#### Bottleneck.strategy.LEAK
+When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
+
+#### Bottleneck.strategy.OVERFLOW_PRIORITY
+Same as `LEAK`, except it will only drop jobs that are *less important* than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
+
+#### Bottleneck.strategy.OVERFLOW
+When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
+
+#### Bottleneck.strategy.BLOCK
+When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
+
+
+### Jobs lifecycle
+
+1. **Received**. Your new job has been added to the limiter. Bottleneck needs to check whether it can be accepted into the queue.
+2. **Queued**. Bottleneck has accepted your job, but it can not tell at what exact timestamp it will run yet, because it is dependent on previous jobs.
+3. **Running**. Your job is not in the queue anymore, it will be executed after a delay that was computed according to your `minTime` setting.
+4. **Executing**. Your job is executing its code.
+5. **Done**. Your job has completed.
+
+**Note:** By default, Bottleneck does not keep track of DONE jobs, to save memory. You can enable this feature by passing `trackDoneStatus: true` as an option when creating a limiter.
+
+#### counts()
+
+```js
+const counts = limiter.counts();
+
+console.log(counts);
+/*
+{
+ RECEIVED: 0,
+ QUEUED: 0,
+ RUNNING: 0,
+ EXECUTING: 0,
+ DONE: 0
+}
+*/
+```
+
+Returns an object with the current number of jobs per status in the limiter.
+
+#### jobStatus()
+
+```js
+console.log(limiter.jobStatus("some-job-id"));
+// Example: QUEUED
+```
+
+Returns the status of the job with the provided job id **in the limiter**. Returns `null` if no job with that id exist.
+
+#### jobs()
+
+```js
+console.log(limiter.jobs("RUNNING"));
+// Example: ['id1', 'id2']
+```
+
+Returns an array of all the job ids with the specified status **in the limiter**. Not passing a status string returns all the known ids.
+
+#### queued()
+
+```js
+const count = limiter.queued(priority);
+
+console.log(count);
+```
+
+`priority` is optional. Returns the number of `QUEUED` jobs with the given `priority` level. Omitting the `priority` argument returns the total number of queued jobs **in the limiter**.
+
+#### clusterQueued()
+
+```js
+const count = await limiter.clusterQueued();
+
+console.log(count);
+```
+
+Returns the number of `QUEUED` jobs **in the Cluster**.
+
+#### empty()
+
+```js
+if (limiter.empty()) {
+ // do something...
+}
+```
+
+Returns a boolean which indicates whether there are any `RECEIVED` or `QUEUED` jobs **in the limiter**.
+
+#### running()
+
+```js
+limiter.running()
+.then((count) => console.log(count));
+```
+
+Returns a promise that returns the **total weight** of the `RUNNING` and `EXECUTING` jobs **in the Cluster**.
+
+#### done()
+
+```js
+limiter.done()
+.then((count) => console.log(count));
+```
+
+Returns a promise that returns the **total weight** of `DONE` jobs **in the Cluster**. Does not require passing the `trackDoneStatus: true` option.
+
+#### check()
+
+```js
+limiter.check()
+.then((wouldRunNow) => console.log(wouldRunNow));
+```
+Checks if a new job would be executed immediately if it was submitted now. Returns a promise that returns a boolean.
+
+
+### Events
+
+__'error'__
+```js
+limiter.on("error", function (error) {
+ /* handle errors here */
+});
+```
+
+The two main causes of error events are: uncaught exceptions in your event handlers, and network errors when Clustering is enabled.
+
+__'failed'__
+```js
+limiter.on("failed", function (error, jobInfo) {
+ // This will be called every time a job fails.
+});
+```
+
+__'retry'__
+
+See [Retries](#retries) to learn how to automatically retry jobs.
+```js
+limiter.on("retry", function (message, jobInfo) {
+ // This will be called every time a job is retried.
+});
+```
+
+__'empty'__
+```js
+limiter.on("empty", function () {
+ // This will be called when `limiter.empty()` becomes true.
+});
+```
+
+__'idle'__
+```js
+limiter.on("idle", function () {
+ // This will be called when `limiter.empty()` is `true` and `limiter.running()` is `0`.
+});
+```
+
+__'dropped'__
+```js
+limiter.on("dropped", function (dropped) {
+ // This will be called when a strategy was triggered.
+ // The dropped request is passed to this event listener.
+});
+```
+
+__'depleted'__
+```js
+limiter.on("depleted", function (empty) {
+ // This will be called every time the reservoir drops to 0.
+ // The `empty` (boolean) argument indicates whether `limiter.empty()` is currently true.
+});
+```
+
+__'debug'__
+```js
+limiter.on("debug", function (message, data) {
+ // Useful to figure out what the limiter is doing in real time
+ // and to help debug your application
+});
+```
+
+__'received'__
+__'queued'__
+__'scheduled'__
+__'executing'__
+__'done'__
+```js
+limiter.on("queued", function (info) {
+ // This event is triggered when a job transitions from one Lifecycle stage to another
+});
+```
+
+See [Jobs Lifecycle](#jobs-lifecycle) for more information.
+
+These Lifecycle events are not triggered for jobs located on another limiter in a Cluster, for performance reasons.
+
+#### Other event methods
+
+Use `removeAllListeners()` with an optional event name as first argument to remove listeners.
+
+Use `.once()` instead of `.on()` to only receive a single event.
+
+
+### Retries
+
+The following example:
+```js
+const limiter = new Bottleneck();
+
+// Listen to the "failed" event
+limiter.on("failed", async (error, jobInfo) => {
+ const id = jobInfo.options.id;
+ console.warn(`Job ${id} failed: ${error}`);
+
+ if (jobInfo.retryCount === 0) { // Here we only retry once
+ console.log(`Retrying job ${id} in 25ms!`);
+ return 25;
+ }
+});
+
+// Listen to the "retry" event
+limiter.on("retry", (error, jobInfo) => console.log(`Now retrying ${jobInfo.options.id}`));
+
+const main = async function () {
+ let executions = 0;
+
+ // Schedule one job
+ const result = await limiter.schedule({ id: 'ABC123' }, async () => {
+ executions++;
+ if (executions === 1) {
+ throw new Error("Boom!");
+ } else {
+ return "Success!";
+ }
+ });
+
+ console.log(`Result: ${result}`);
+}
+
+main();
+```
+will output
+```
+Job ABC123 failed: Error: Boom!
+Retrying job ABC123 in 25ms!
+Now retrying ABC123
+Result: Success!
+```
+To re-run your job, simply return an integer from the `'failed'` event handler. The number returned is how many milliseconds to wait before retrying it. Return `0` to retry it immediately.
+
+**IMPORTANT:** When you ask the limiter to retry a job it will not send it back into the queue. It will stay in the `EXECUTING` [state](#jobs-lifecycle) until it succeeds or until you stop retrying it. **This means that it counts as a concurrent job for `maxConcurrent` even while it's just waiting to be retried.** The number of milliseconds to wait ignores your `minTime` settings.
+
+
+### updateSettings()
+
+```js
+limiter.updateSettings(options);
+```
+The options are the same as the [limiter constructor](#constructor).
+
+**Note:** Changes don't affect `SCHEDULED` jobs.
+
+### incrementReservoir()
+
+```js
+limiter.incrementReservoir(incrementBy);
+```
+Returns a promise that returns the new reservoir value.
+
+### currentReservoir()
+
+```js
+limiter.currentReservoir()
+.then((reservoir) => console.log(reservoir));
+```
+Returns a promise that returns the current reservoir value.
+
+### stop()
+
+The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all `EXECUTING` jobs to complete.
+
+```js
+limiter.stop(options)
+.then(() => {
+ console.log("Shutdown completed!")
+});
+```
+
+`stop()` returns a promise that resolves once all the `EXECUTING` jobs have completed and, if desired, once all non-`EXECUTING` jobs have been dropped.
+
+| Option | Default | Description |
+|--------|---------|-------------|
+| `dropWaitingJobs` | `true` | When `true`, drop all the `RECEIVED`, `QUEUED` and `RUNNING` jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method. |
+| `dropErrorMessage` | `This limiter has been stopped.` | The error message used to drop jobs when `dropWaitingJobs` is `true`. |
+| `enqueueErrorMessage` | `This limiter has been stopped and cannot accept new jobs.` | The error message used to reject a job added to the limiter after `stop()` has been called. |
+
+### chain()
+
+Tasks that are ready to be executed will be added to that other limiter. Suppose you have 2 types of tasks, A and B. They both have their own limiter with their own settings, but both must also follow a global limiter G:
+```js
+const limiterA = new Bottleneck( /* some settings */ );
+const limiterB = new Bottleneck( /* some different settings */ );
+const limiterG = new Bottleneck( /* some global settings */ );
+
+limiterA.chain(limiterG);
+limiterB.chain(limiterG);
+
+// Requests added to limiterA must follow the A and G rate limits.
+// Requests added to limiterB must follow the B and G rate limits.
+// Requests added to limiterG must follow the G rate limits.
+```
+
+To unchain, call `limiter.chain(null);`.
+
+## Group
+
+The `Group` feature of Bottleneck manages many limiters automatically for you. It creates limiters dynamically and transparently.
+
+Let's take a DNS server as an example of how Bottleneck can be used. It's a service that sees a lot of abuse and where incoming DNS requests need to be rate limited. Bottleneck is so tiny, it's acceptable to create one limiter for each origin IP, even if it means creating thousands of limiters. The `Group` feature is perfect for this use case. Create one Group and use the origin IP to rate limit each IP independently. Each call with the same key (IP) will be routed to the same underlying limiter. A Group is created like a limiter:
+
+
+```js
+const group = new Bottleneck.Group(options);
+```
+
+The `options` object will be used for every limiter created by the Group.
+
+The Group is then used with the `.key(str)` method:
+
+```js
+// In this example, the key is an IP
+group.key("77.66.54.32").schedule(() => {
+ /* process the request */
+});
+```
+
+#### key()
+
+* `str` : The key to use. All jobs added with the same key will use the same underlying limiter. *Default: `""`*
+
+The return value of `.key(str)` is a limiter. If it doesn't already exist, it is generated for you. Calling `key()` is how limiters are created inside a Group.
+
+Limiters that have been idle for longer than 5 minutes are deleted to avoid memory leaks, this value can be changed by passing a different `timeout` option, in milliseconds.
+
+#### on("created")
+
+```js
+group.on("created", (limiter, key) => {
+ console.log("A new limiter was created for key: " + key)
+
+ // Prepare the limiter, for example we'll want to listen to its "error" events!
+ limiter.on("error", (err) => {
+ // Handle errors here
+ })
+});
+```
+
+Listening for the `"created"` event is the recommended way to set up a new limiter. Your event handler is executed before `key()` returns the newly created limiter.
+
+#### updateSettings()
+
+```js
+const group = new Bottleneck.Group({ maxConcurrent: 2, minTime: 250 });
+group.updateSettings({ minTime: 500 });
+```
+After executing the above commands, **new limiters** will be created with `{ maxConcurrent: 2, minTime: 500 }`.
+
+
+#### deleteKey()
+
+* `str`: The key for the limiter to delete.
+
+Manually deletes the limiter at the specified key. When using Clustering, the Redis data is immediately deleted and the other Groups in the Cluster will eventually delete their local key automatically, unless it is still being used.
+
+#### keys()
+
+Returns an array containing all the keys in the Group.
+
+#### clusterKeys()
+
+Same as `group.keys()`, but returns all keys in this Group ID across the Cluster.
+
+#### limiters()
+
+```js
+const limiters = group.limiters();
+
+console.log(limiters);
+// [ { key: "some key", limiter: }, { key: "some other key", limiter: } ]
+```
+
+## Batching
+
+Some APIs can accept multiple operations in a single call. Bottleneck's Batching feature helps you take advantage of those APIs:
+```js
+const batcher = new Bottleneck.Batcher({
+ maxTime: 1000,
+ maxSize: 10
+});
+
+batcher.on("batch", (batch) => {
+ console.log(batch); // ["some-data", "some-other-data"]
+
+ // Handle batch here
+});
+
+batcher.add("some-data");
+batcher.add("some-other-data");
+```
+
+`batcher.add()` returns a Promise that resolves once the request has been flushed to a `"batch"` event.
+
+| Option | Default | Description |
+|--------|---------|-------------|
+| `maxTime` | `null` (unlimited) | Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event. |
+| `maxSize` | `null` (unlimited) | Maximum number of requests in a batch. |
+
+Batching doesn't throttle requests, it only groups them up optimally according to your `maxTime` and `maxSize` settings.
+
+## Clustering
+
+Clustering lets many limiters access the same shared state, stored in Redis. Changes to the state are Atomic, Consistent and Isolated (and fully [ACID](https://en.wikipedia.org/wiki/ACID) with the right [Durability](https://redis.io/topics/persistence) configuration), to eliminate any chances of race conditions or state corruption. Your settings, such as `maxConcurrent`, `minTime`, etc., are shared across the whole cluster, which means —for example— that `{ maxConcurrent: 5 }` guarantees no more than 5 jobs can ever run at a time in the entire cluster of limiters. 100% of Bottleneck's features are supported in Clustering mode. Enabling Clustering is as simple as changing a few settings. It's also a convenient way to store or export state for later use.
+
+Bottleneck will attempt to spread load evenly across limiters.
+
+### Enabling Clustering
+
+First, add `redis` or `ioredis` to your application's dependencies:
+```bash
+# NodeRedis (https://github.com/NodeRedis/node_redis)
+npm install --save redis
+
+# or ioredis (https://github.com/luin/ioredis)
+npm install --save ioredis
+```
+Then create a limiter or a Group:
+```js
+const limiter = new Bottleneck({
+ /* Some basic options */
+ maxConcurrent: 5,
+ minTime: 500
+ id: "my-super-app" // All limiters with the same id will be clustered together
+
+ /* Clustering options */
+ datastore: "redis", // or "ioredis"
+ clearDatastore: false,
+ clientOptions: {
+ host: "127.0.0.1",
+ port: 6379
+
+ // Redis client options
+ // Using NodeRedis? See https://github.com/NodeRedis/node_redis#options-object-properties
+ // Using ioredis? See https://github.com/luin/ioredis/blob/master/API.md#new-redisport-host-options
+ }
+});
+```
+
+| Option | Default | Description |
+|--------|---------|-------------|
+| `datastore` | `"local"` | Where the limiter stores its internal state. The default (`"local"`) keeps the state in the limiter itself. Set it to `"redis"` or `"ioredis"` to enable Clustering. |
+| `clearDatastore` | `false` | When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db. |
+| `clientOptions` | `{}` | This object is passed directly to the redis client library you've selected. |
+| `clusterNodes` | `null` | **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)` instead of `new Redis(clientOptions)`. |
+| `timeout` | `null` (no TTL) | The Redis TTL in milliseconds ([TTL](https://redis.io/commands/ttl)) for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after `timeout` milliseconds of inactivity. |
+| `Redis` | `null` | Overrides the import/require of the redis/ioredis library. You shouldn't need to set this option unless your application is failing to start due to a failure to require/import the client library. |
+
+**Note: When using Groups**, the `timeout` option has a default of `300000` milliseconds and the generated limiters automatically receive an `id` with the pattern `${group.id}-${KEY}`.
+
+**Note:** If you are seeing a runtime error due to the `require()` function not being able to load `redis`/`ioredis`, then directly pass the module as the `Redis` option. Example:
+```js
+import Redis from "ioredis"
+
+const limiter = new Bottleneck({
+ id: "my-super-app",
+ datastore: "ioredis",
+ clientOptions: { host: '12.34.56.78', port: 6379 },
+ Redis
+});
+```
+Unfortunately, this is a side effect of having to disable inlining, which is necessary to make Bottleneck easy to use in the browser.
+
+### Important considerations when Clustering
+
+The first limiter connecting to Redis will store its [constructor options](#constructor) on Redis and all subsequent limiters will be using those settings. You can alter the constructor options used by all the connected limiters by calling `updateSettings()`. The `clearDatastore` option instructs a new limiter to wipe any previous Bottleneck data (for that `id`), including previously stored settings.
+
+Queued jobs are **NOT** stored on Redis. They are local to each limiter. Exiting the Node.js process will lose those jobs. This is because Bottleneck has no way to propagate the JS code to run a job across a different Node.js process than the one it originated on. Bottleneck doesn't keep track of the queue contents of the limiters on a cluster for performance and reliability reasons. You can use something like [`BeeQueue`](https://github.com/bee-queue/bee-queue) in addition to Bottleneck to get around this limitation.
+
+Due to the above, functionality relying on the queue length happens purely locally:
+- Priorities are local. A higher priority job will run before a lower priority job **on the same limiter**. Another limiter on the cluster might run a lower priority job before our higher priority one.
+- Assuming constant priority levels, Bottleneck guarantees that jobs will be run in the order they were received **on the same limiter**. Another limiter on the cluster might run a job received later before ours runs.
+- `highWater` and load shedding ([strategies](#strategies)) are per limiter. However, one limiter entering Blocked mode will put the entire cluster in Blocked mode until `penalty` milliseconds have passed. See [Strategies](#strategies).
+- The `"empty"` event is triggered when the (local) queue is empty.
+- The `"idle"` event is triggered when the (local) queue is empty *and* no jobs are currently running anywhere in the cluster.
+
+You must work around these limitations in your application code if they are an issue to you. The `publish()` method could be useful here.
+
+The current design guarantees reliability, is highly performant and lets limiters come and go. Your application can scale up or down, and clients can be disconnected at any time without issues.
+
+It is **strongly recommended** that you give an `id` to every limiter and Group since it is used to build the name of your limiter's Redis keys! Limiters with the same `id` inside the same Redis db will be sharing the same datastore.
+
+It is **strongly recommended** that you set an `expiration` (See [Job Options](#job-options)) *on every job*, since that lets the cluster recover from crashed or disconnected clients. Otherwise, a client crashing while executing a job would not be able to tell the cluster to decrease its number of "running" jobs. By using expirations, those lost jobs are automatically cleared after the specified time has passed. Using expirations is essential to keeping a cluster reliable in the face of unpredictable application bugs, network hiccups, and so on.
+
+Network latency between Node.js and Redis is not taken into account when calculating timings (such as `minTime`). To minimize the impact of latency, Bottleneck only performs a single Redis call per [lifecycle transition](#jobs-lifecycle). Keeping the Redis server close to your limiters will help you get a more consistent experience. Keeping the system time consistent across all clients will also help.
+
+It is **strongly recommended** to [set up an `"error"` listener](#events) on all your limiters and on your Groups.
+
+### Clustering Methods
+
+The `ready()`, `publish()` and `clients()` methods also exist when using the `local` datastore, for code compatibility reasons: code written for `redis`/`ioredis` won't break with `local`.
+
+#### ready()
+
+This method returns a promise that resolves once the limiter is connected to Redis.
+
+As of v2.9.0, it's no longer necessary to wait for `.ready()` to resolve before issuing commands to a limiter. The commands will be queued until the limiter successfully connects. Make sure to listen to the `"error"` event to handle connection errors.
+
+```js
+const limiter = new Bottleneck({/* options */});
+
+limiter.on("error", (err) => {
+ // handle network errors
+});
+
+limiter.ready()
+.then(() => {
+ // The limiter is ready
+});
+```
+
+#### publish(message)
+
+This method broadcasts the `message` string to every limiter in the Cluster. It returns a promise.
+```js
+const limiter = new Bottleneck({/* options */});
+
+limiter.on("message", (msg) => {
+ console.log(msg); // prints "this is a string"
+});
+
+limiter.publish("this is a string");
+```
+
+To send objects, stringify them first:
+```js
+limiter.on("message", (msg) => {
+ console.log(JSON.parse(msg).hello) // prints "world"
+});
+
+limiter.publish(JSON.stringify({ hello: "world" }));
+```
+
+#### clients()
+
+If you need direct access to the redis clients, use `.clients()`:
+```js
+console.log(limiter.clients());
+// { client: , subscriber: }
+```
+
+### Additional Clustering information
+
+- Bottleneck is compatible with [Redis Clusters](https://redis.io/topics/cluster-tutorial), but you must use the `ioredis` datastore and the `clusterNodes` option.
+- Bottleneck is compatible with Redis Sentinel, but you must use the `ioredis` datastore.
+- Bottleneck's data is stored in Redis keys starting with `b_`. It also uses pubsub channels starting with `b_` It will not interfere with any other data stored on the server.
+- Bottleneck loads a few Lua scripts on the Redis server using the `SCRIPT LOAD` command. These scripts only take up a few Kb of memory. Running the `SCRIPT FLUSH` command will cause any connected limiters to experience critical errors until a new limiter connects to Redis and loads the scripts again.
+- The Lua scripts are highly optimized and designed to use as few resources as possible.
+
+### Managing Redis Connections
+
+Bottleneck needs to create 2 Redis Clients to function, one for normal operations and one for pubsub subscriptions. These 2 clients are kept in a `Bottleneck.RedisConnection` (NodeRedis) or a `Bottleneck.IORedisConnection` (ioredis) object, referred to as the Connection object.
+
+By default, every Group and every standalone limiter (a limiter not created by a Group) will create their own Connection object, but it is possible to manually control this behavior. In this example, every Group and limiter is sharing the same Connection object and therefore the same 2 clients:
+```js
+const connection = new Bottleneck.RedisConnection({
+ clientOptions: {/* NodeRedis/ioredis options */}
+ // ioredis also accepts `clusterNodes` here
+});
+
+
+const limiter = new Bottleneck({ connection: connection });
+const group = new Bottleneck.Group({ connection: connection });
+```
+You can access and reuse the Connection object of any Group or limiter:
+```js
+const group = new Bottleneck.Group({ connection: limiter.connection });
+```
+When a Connection object is created manually, the connectivity `"error"` events are emitted on the Connection itself.
+```js
+connection.on("error", (err) => { /* handle connectivity errors here */ });
+```
+If you already have a NodeRedis/ioredis client, you can ask Bottleneck to reuse it, although currently the Connection object will still create a second client for pubsub operations:
+```js
+import Redis from "redis";
+const client = new Redis.createClient({/* options */});
+
+const connection = new Bottleneck.RedisConnection({
+ // `clientOptions` and `clusterNodes` will be ignored since we're passing a raw client
+ client: client
+});
+
+const limiter = new Bottleneck({ connection: connection });
+const group = new Bottleneck.Group({ connection: connection });
+```
+Depending on your application, using more clients can improve performance.
+
+Use the `disconnect(flush)` method to close the Redis clients.
+```js
+limiter.disconnect();
+group.disconnect();
+```
+If you created the Connection object manually, you need to call `connection.disconnect()` instead, for safety reasons.
+
+## Debugging your application
+
+Debugging complex scheduling logic can be difficult, especially when priorities, weights, and network latency all interact with one another.
+
+If your application is not behaving as expected, start by making sure you're catching `"error"` [events emitted](#events) by your limiters and your Groups. Those errors are most likely uncaught exceptions from your application code.
+
+Make sure you've read the ['Gotchas'](#gotchas) section.
+
+To see exactly what a limiter is doing in real time, listen to the `"debug"` event. It contains detailed information about how the limiter is executing your code. Adding [job IDs](#job-options) to all your jobs makes the debug output more readable.
+
+When Bottleneck has to fail one of your jobs, it does so by using `BottleneckError` objects. This lets you tell those errors apart from your own code's errors:
+```js
+limiter.schedule(fn)
+.then((result) => { /* ... */ } )
+.catch((error) => {
+ if (error instanceof Bottleneck.BottleneckError) {
+ /* ... */
+ }
+});
+```
+
+## Upgrading to v2
+
+The internal algorithms essentially haven't changed from v1, but many small changes to the interface were made to introduce new features.
+
+All the breaking changes:
+- Bottleneck v2 requires Node 6+ or a modern browser. Use `require("bottleneck/es5")` if you need ES5 support in v2. Bottleneck v1 will continue to use ES5 only.
+- The Bottleneck constructor now takes an options object. See [Constructor](#constructor).
+- The `Cluster` feature is now called `Group`. This is to distinguish it from the new v2 [Clustering](#clustering) feature.
+- The `Group` constructor takes an options object to match the limiter constructor.
+- Jobs take an optional options object. See [Job options](#job-options).
+- Removed `submitPriority()`, use `submit()` with an options object instead.
+- Removed `schedulePriority()`, use `schedule()` with an options object instead.
+- The `rejectOnDrop` option is now `true` by default. It can be set to `false` if you wish to retain v1 behavior. However this option is left undocumented as enabling it is considered to be a poor practice.
+- Use `null` instead of `0` to indicate an unlimited `maxConcurrent` value.
+- Use `null` instead of `-1` to indicate an unlimited `highWater` value.
+- Renamed `changeSettings()` to `updateSettings()`, it now returns a promise to indicate completion. It takes the same options object as the constructor.
+- Renamed `nbQueued()` to `queued()`.
+- Renamed `nbRunning` to `running()`, it now returns its result using a promise.
+- Removed `isBlocked()`.
+- Changing the Promise library is now done through the options object like any other limiter setting.
+- Removed `changePenalty()`, it is now done through the options object like any other limiter setting.
+- Removed `changeReservoir()`, it is now done through the options object like any other limiter setting.
+- Removed `stopAll()`. Use the new `stop()` method.
+- `check()` now accepts an optional `weight` argument, and returns its result using a promise.
+- Removed the `Group` `changeTimeout()` method. Instead, pass a `timeout` option when creating a Group.
+
+Version 2 is more user-friendly and powerful.
+
+After upgrading your code, please take a minute to read the [Debugging your application](#debugging-your-application) chapter.
+
+
+## Contributing
+
+This README is always in need of improvements. If wording can be clearer and simpler, please consider forking this repo and submitting a Pull Request, or simply opening an issue.
+
+Suggestions and bug reports are also welcome.
+
+To work on the Bottleneck code, simply clone the repo, makes your changes to the files located in `src/` only, then run `./scripts/build.sh && npm test` to ensure that everything is set up correctly.
+
+To speed up compilation time during development, run `./scripts/build.sh dev` instead. Make sure to build and test without `dev` before submitting a PR.
+
+The tests must also pass in Clustering mode and using the ES5 bundle. You'll need a Redis server running locally (latency needs to be minimal to run the tests). If the server isn't using the default hostname and port, you can set those in the `.env` file. Then run `./scripts/build.sh && npm run test-all`.
+
+All contributions are appreciated and will be considered.
+
+[license-url]: https://github.com/SGrondin/bottleneck/blob/master/LICENSE
+
+[npm-url]: https://www.npmjs.com/package/bottleneck
+[npm-license]: https://img.shields.io/npm/l/bottleneck.svg?style=flat
+[npm-version]: https://img.shields.io/npm/v/bottleneck.svg?style=flat
+[npm-downloads]: https://img.shields.io/npm/dm/bottleneck.svg?style=flat
diff --git a/node_modules/bottleneck/bottleneck.d.ts b/node_modules/bottleneck/bottleneck.d.ts
new file mode 100644
index 000000000..3ad20c128
--- /dev/null
+++ b/node_modules/bottleneck/bottleneck.d.ts
@@ -0,0 +1,629 @@
+declare module "bottleneck" {
+ namespace Bottleneck {
+ type ConstructorOptions = {
+ /**
+ * How many jobs can be running at the same time.
+ */
+ readonly maxConcurrent?: number | null;
+ /**
+ * How long to wait after launching a job before launching another one.
+ */
+ readonly minTime?: number | null;
+ /**
+ * How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load.
+ */
+ readonly highWater?: number | null;
+ /**
+ * Which strategy to use if the queue gets longer than the high water mark.
+ */
+ readonly strategy?: Bottleneck.Strategy | null;
+ /**
+ * The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy.
+ */
+ readonly penalty?: number | null;
+ /**
+ * How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`.
+ */
+ readonly reservoir?: number | null;
+ /**
+ * Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`.
+ */
+ readonly reservoirRefreshInterval?: number | null;
+ /**
+ * The value to reset `reservoir` to when `reservoirRefreshInterval` is in use.
+ */
+ readonly reservoirRefreshAmount?: number | null;
+ /**
+ * The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use.
+ */
+ readonly reservoirIncreaseAmount?: number | null;
+ /**
+ * Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`.
+ */
+ readonly reservoirIncreaseInterval?: number | null;
+ /**
+ * The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use.
+ */
+ readonly reservoirIncreaseMaximum?: number | null;
+ /**
+ * Optional identifier
+ */
+ readonly id?: string | null;
+ /**
+ * Set to true to leave your failed jobs hanging instead of failing them.
+ */
+ readonly rejectOnDrop?: boolean | null;
+ /**
+ * Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory.
+ */
+ readonly trackDoneStatus?: boolean | null;
+ /**
+ * Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering.
+ */
+ readonly datastore?: string | null;
+ /**
+ * Override the Promise library used by Bottleneck.
+ */
+ readonly Promise?: any;
+ /**
+ * This object is passed directly to the redis client library you've selected.
+ */
+ readonly clientOptions?: any;
+ /**
+ * **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
+ */
+ readonly clusterNodes?: any;
+ /**
+ * An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use.
+ * If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored.
+ */
+ /**
+ * Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime.
+ */
+ readonly Redis?: any;
+ /**
+ * Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`.
+ */
+ readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null;
+ /**
+ * When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db.
+ */
+ readonly clearDatastore?: boolean | null;
+ /**
+ * The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group.
+ */
+ readonly timeout?: number | null;
+
+ [propName: string]: any;
+ };
+ type JobOptions = {
+ /**
+ * A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`.
+ */
+ readonly priority?: number | null;
+ /**
+ * Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using).
+ */
+ readonly weight?: number | null;
+ /**
+ * The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`.
+ */
+ readonly expiration?: number | null;
+ /**
+ * Optional identifier, helps with debug output.
+ */
+ readonly id?: string | null;
+ };
+ type StopOptions = {
+ /**
+ * When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method.
+ */
+ readonly dropWaitingJobs?: boolean | null;
+ /**
+ * The error message used to drop jobs when `dropWaitingJobs` is `true`.
+ */
+ readonly dropErrorMessage?: string | null;
+ /**
+ * The error message used to reject a job added to the limiter after `stop()` has been called.
+ */
+ readonly enqueueErrorMessage?: string | null;
+ };
+ type Callback = (err: any, result: T) => void;
+ type ClientsList = { client?: any; subscriber?: any };
+ type GroupLimiterPair = { key: string; limiter: Bottleneck };
+ interface Strategy {}
+
+ type EventInfo = {
+ readonly args: any[];
+ readonly options: {
+ readonly id: string;
+ readonly priority: number;
+ readonly weight: number;
+ readonly expiration?: number;
+ };
+ };
+ type EventInfoDropped = EventInfo & {
+ readonly task: Function;
+ readonly promise: Promise;
+ };
+ type EventInfoQueued = EventInfo & {
+ readonly reachedHWM: boolean;
+ readonly blocked: boolean;
+ };
+ type EventInfoRetryable = EventInfo & { readonly retryCount: number; };
+
+ enum Status {
+ RECEIVED = "RECEIVED",
+ QUEUED = "QUEUED",
+ RUNNING = "RUNNING",
+ EXECUTING = "EXECUTING",
+ DONE = "DONE"
+ }
+ type Counts = {
+ RECEIVED: number,
+ QUEUED: number,
+ RUNNING: number,
+ EXECUTING: number,
+ DONE?: number
+ };
+
+ type RedisConnectionOptions = {
+ /**
+ * This object is passed directly to NodeRedis' createClient() method.
+ */
+ readonly clientOptions?: any;
+ /**
+ * An existing NodeRedis client to use. If using, `clientOptions` will be ignored.
+ */
+ readonly client?: any;
+ /**
+ * Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime.
+ */
+ readonly Redis?: any;
+ };
+
+ type IORedisConnectionOptions = {
+ /**
+ * This object is passed directly to ioredis' constructor method.
+ */
+ readonly clientOptions?: any;
+ /**
+ * When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
+ */
+ readonly clusterNodes?: any;
+ /**
+ * An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored.
+ */
+ readonly client?: any;
+ /**
+ * Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime.
+ */
+ readonly Redis?: any;
+ };
+
+ type BatcherOptions = {
+ /**
+ * Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event.
+ */
+ readonly maxTime?: number | null;
+ /**
+ * Maximum number of requests in a batch.
+ */
+ readonly maxSize?: number | null;
+ };
+
+ class BottleneckError extends Error {
+ }
+
+ class RedisConnection {
+ constructor(options?: Bottleneck.RedisConnectionOptions);
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Waits until the connection is ready and returns the raw Node_Redis clients.
+ */
+ ready(): Promise;
+
+ /**
+ * Close the redis clients.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+ }
+
+ class IORedisConnection {
+ constructor(options?: Bottleneck.IORedisConnectionOptions);
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Waits until the connection is ready and returns the raw ioredis clients.
+ */
+ ready(): Promise;
+
+ /**
+ * Close the redis clients.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+ }
+
+ class Batcher {
+ constructor(options?: Bottleneck.BatcherOptions);
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: string, fn: Function): void;
+ on(name: "error", fn: (error: any) => void): void;
+ on(name: "batch", fn: (batch: any[]) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: string, fn: Function): void;
+ once(name: "error", fn: (error: any) => void): void;
+ once(name: "batch", fn: (batch: any[]) => void): void;
+
+ /**
+ * Add a request to the Batcher. Batches are flushed to the "batch" event.
+ */
+ add(data: any): Promise;
+ }
+
+ class Group {
+ constructor(options?: Bottleneck.ConstructorOptions);
+
+ id: string;
+ datastore: string;
+ connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
+
+ /**
+ * Returns the limiter for the specified key.
+ * @param str - The limiter key.
+ */
+ key(str: string): Bottleneck;
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: string, fn: Function): void;
+ on(name: "error", fn: (error: any) => void): void;
+ on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: string, fn: Function): void;
+ once(name: "error", fn: (error: any) => void): void;
+ once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
+
+ /**
+ * Removes all registered event listeners.
+ * @param name - The optional event name to remove listeners from.
+ */
+ removeAllListeners(name?: string): void;
+
+ /**
+ * Updates the group settings.
+ * @param options - The new settings.
+ */
+ updateSettings(options: Bottleneck.ConstructorOptions): void;
+
+ /**
+ * Deletes the limiter for the given key.
+ * Returns true if a key was deleted.
+ * @param str - The key
+ */
+ deleteKey(str: string): Promise;
+
+ /**
+ * Disconnects the underlying redis clients, unless the Group was created with the `connection` option.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+
+ /**
+ * Returns all the key-limiter pairs.
+ */
+ limiters(): Bottleneck.GroupLimiterPair[];
+
+ /**
+ * Returns all Group keys in the local instance
+ */
+ keys(): string[];
+
+ /**
+ * Returns all Group keys in the Cluster
+ */
+ clusterKeys(): Promise;
+ }
+
+ class Events {
+ constructor(object: Object);
+
+ /**
+ * Returns the number of limiters for the event name
+ * @param name - The event name.
+ */
+ listenerCount(name: string): number;
+
+ /**
+ * Returns a promise with the first non-null/non-undefined result from a listener
+ * @param name - The event name.
+ * @param args - The arguments to pass to the event listeners.
+ */
+ trigger(name: string, ...args: any[]): Promise;
+ }
+ }
+
+ class Bottleneck {
+ public static readonly strategy: {
+ /**
+ * When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
+ */
+ readonly LEAK: Bottleneck.Strategy;
+ /**
+ * Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
+ */
+ readonly OVERFLOW_PRIORITY: Bottleneck.Strategy;
+ /**
+ * When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
+ */
+ readonly OVERFLOW: Bottleneck.Strategy;
+ /**
+ * When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
+ */
+ readonly BLOCK: Bottleneck.Strategy;
+ };
+
+ constructor(options?: Bottleneck.ConstructorOptions);
+
+ id: string;
+ datastore: string;
+ connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
+
+ /**
+ * Returns a promise which will be resolved once the limiter is ready to accept jobs
+ * or rejected if it fails to start up.
+ */
+ ready(): Promise;
+
+ /**
+ * Returns a datastore-specific object of redis clients.
+ */
+ clients(): Bottleneck.ClientsList;
+
+ /**
+ * Returns the name of the Redis pubsub channel used for this limiter
+ */
+ channel(): string;
+
+ /**
+ * Disconnects the underlying redis clients, unless the limiter was created with the `connection` option.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+
+ /**
+ * Broadcast a string to every limiter in the Cluster.
+ */
+ publish(message: string): Promise;
+
+ /**
+ * Returns an object with the current number of jobs per status.
+ */
+ counts(): Bottleneck.Counts;
+
+ /**
+ * Returns the status of the job with the provided job id.
+ */
+ jobStatus(id: string): Bottleneck.Status;
+
+ /**
+ * Returns the status of the job with the provided job id.
+ */
+ jobs(status?: Bottleneck.Status): string[];
+
+ /**
+ * Returns the number of requests queued.
+ * @param priority - Returns the number of requests queued with the specified priority.
+ */
+ queued(priority?: number): number;
+
+ /**
+ * Returns the number of requests queued across the Cluster.
+ */
+ clusterQueued(): Promise;
+
+ /**
+ * Returns whether there are any jobs currently in the queue or in the process of being added to the queue.
+ */
+ empty(): boolean;
+
+ /**
+ * Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster.
+ */
+ running(): Promise;
+
+ /**
+ * Returns the total weight of jobs in a DONE state in the Cluster.
+ */
+ done(): Promise;
+
+ /**
+ * If a request was added right now, would it be run immediately?
+ * @param weight - The weight of the request
+ */
+ check(weight?: number): Promise;
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: "error", fn: (error: any) => void): void;
+ on(name: "empty", fn: () => void): void;
+ on(name: "idle", fn: () => void): void;
+ on(name: "depleted", fn: (empty: boolean) => void): void;
+ on(name: "message", fn: (message: string) => void): void;
+ on(name: "debug", fn: (message: string, info: any) => void): void;
+ on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
+ on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
+ on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
+ on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
+ on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+ on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void;
+ on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
+ on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: "error", fn: (error: any) => void): void;
+ once(name: "empty", fn: () => void): void;
+ once(name: "idle", fn: () => void): void;
+ once(name: "depleted", fn: (empty: boolean) => void): void;
+ once(name: "message", fn: (message: string) => void): void;
+ once(name: "debug", fn: (message: string, info: any) => void): void;
+ once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
+ once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
+ once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
+ once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
+ once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+ once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void;
+ once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
+ once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+
+ /**
+ * Removes all registered event listeners.
+ * @param name - The optional event name to remove listeners from.
+ */
+ removeAllListeners(name?: string): void;
+
+ /**
+ * Changes the settings for future requests.
+ * @param options - The new settings.
+ */
+ updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck;
+
+ /**
+ * Adds to the reservoir count and returns the new value.
+ */
+ incrementReservoir(incrementBy: number): Promise;
+
+ /**
+ * The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete.
+ */
+ stop(options?: Bottleneck.StopOptions): Promise;
+
+ /**
+ * Returns the current reservoir count, if any.
+ */
+ currentReservoir(): Promise;
+
+ /**
+ * Chain this limiter to another.
+ * @param limiter - The limiter that requests to this limiter must also follow.
+ */
+ chain(limiter?: Bottleneck): Bottleneck;
+
+ wrap(fn: () => PromiseLike): (() => Promise) & { withOptions: (options: Bottleneck.JobOptions) => Promise; };
+ wrap(fn: (arg1: A1) => PromiseLike): ((arg1: A1) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2) => PromiseLike): ((arg1: A1, arg2: A2) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise; };
+ wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise; };
+
+ submit(fn: (callback: Bottleneck.Callback) => void, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, callback: Bottleneck.Callback) => void, arg1: A1, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback): void;
+ submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback): void;
+
+ submit(options: Bottleneck.JobOptions, fn: (callback: Bottleneck.Callback) => void, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, callback: Bottleneck.Callback) => void, arg1: A1, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback): void;
+ submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback): void;
+
+ schedule(fn: () => PromiseLike): Promise;
+ schedule(fn: (arg1: A1) => PromiseLike, arg1: A1): Promise;
+ schedule(fn: (arg1: A1, arg2: A2) => PromiseLike, arg1: A1, arg2: A2): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike, arg1: A1, arg2: A2, arg3: A3): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise;
+ schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise;
+
+ schedule(options: Bottleneck.JobOptions, fn: () => PromiseLike): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1) => PromiseLike, arg1: A1): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2) => PromiseLike, arg1: A1, arg2: A2): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike, arg1: A1, arg2: A2, arg3: A3): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise;
+ schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise;
+ }
+
+ export default Bottleneck;
+}
+
diff --git a/node_modules/bottleneck/bottleneck.d.ts.ejs b/node_modules/bottleneck/bottleneck.d.ts.ejs
new file mode 100644
index 000000000..18f19ad8a
--- /dev/null
+++ b/node_modules/bottleneck/bottleneck.d.ts.ejs
@@ -0,0 +1,588 @@
+declare module "bottleneck" {
+ namespace Bottleneck {
+ type ConstructorOptions = {
+ /**
+ * How many jobs can be running at the same time.
+ */
+ readonly maxConcurrent?: number | null;
+ /**
+ * How long to wait after launching a job before launching another one.
+ */
+ readonly minTime?: number | null;
+ /**
+ * How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load.
+ */
+ readonly highWater?: number | null;
+ /**
+ * Which strategy to use if the queue gets longer than the high water mark.
+ */
+ readonly strategy?: Bottleneck.Strategy | null;
+ /**
+ * The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy.
+ */
+ readonly penalty?: number | null;
+ /**
+ * How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`.
+ */
+ readonly reservoir?: number | null;
+ /**
+ * Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`.
+ */
+ readonly reservoirRefreshInterval?: number | null;
+ /**
+ * The value to reset `reservoir` to when `reservoirRefreshInterval` is in use.
+ */
+ readonly reservoirRefreshAmount?: number | null;
+ /**
+ * The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use.
+ */
+ readonly reservoirIncreaseAmount?: number | null;
+ /**
+ * Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`.
+ */
+ readonly reservoirIncreaseInterval?: number | null;
+ /**
+ * The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use.
+ */
+ readonly reservoirIncreaseMaximum?: number | null;
+ /**
+ * Optional identifier
+ */
+ readonly id?: string | null;
+ /**
+ * Set to true to leave your failed jobs hanging instead of failing them.
+ */
+ readonly rejectOnDrop?: boolean | null;
+ /**
+ * Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory.
+ */
+ readonly trackDoneStatus?: boolean | null;
+ /**
+ * Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering.
+ */
+ readonly datastore?: string | null;
+ /**
+ * Override the Promise library used by Bottleneck.
+ */
+ readonly Promise?: any;
+ /**
+ * This object is passed directly to the redis client library you've selected.
+ */
+ readonly clientOptions?: any;
+ /**
+ * **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
+ */
+ readonly clusterNodes?: any;
+ /**
+ * An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use.
+ * If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored.
+ */
+ /**
+ * Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime.
+ */
+ readonly Redis?: any;
+ /**
+ * Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`.
+ */
+ readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null;
+ /**
+ * When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db.
+ */
+ readonly clearDatastore?: boolean | null;
+ /**
+ * The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group.
+ */
+ readonly timeout?: number | null;
+
+ [propName: string]: any;
+ };
+ type JobOptions = {
+ /**
+ * A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`.
+ */
+ readonly priority?: number | null;
+ /**
+ * Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using).
+ */
+ readonly weight?: number | null;
+ /**
+ * The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`.
+ */
+ readonly expiration?: number | null;
+ /**
+ * Optional identifier, helps with debug output.
+ */
+ readonly id?: string | null;
+ };
+ type StopOptions = {
+ /**
+ * When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method.
+ */
+ readonly dropWaitingJobs?: boolean | null;
+ /**
+ * The error message used to drop jobs when `dropWaitingJobs` is `true`.
+ */
+ readonly dropErrorMessage?: string | null;
+ /**
+ * The error message used to reject a job added to the limiter after `stop()` has been called.
+ */
+ readonly enqueueErrorMessage?: string | null;
+ };
+ type Callback = (err: any, result: T) => void;
+ type ClientsList = { client?: any; subscriber?: any };
+ type GroupLimiterPair = { key: string; limiter: Bottleneck };
+ interface Strategy {}
+
+ type EventInfo = {
+ readonly args: any[];
+ readonly options: {
+ readonly id: string;
+ readonly priority: number;
+ readonly weight: number;
+ readonly expiration?: number;
+ };
+ };
+ type EventInfoDropped = EventInfo & {
+ readonly task: Function;
+ readonly promise: Promise;
+ };
+ type EventInfoQueued = EventInfo & {
+ readonly reachedHWM: boolean;
+ readonly blocked: boolean;
+ };
+ type EventInfoRetryable = EventInfo & { readonly retryCount: number; };
+
+ enum Status {
+ RECEIVED = "RECEIVED",
+ QUEUED = "QUEUED",
+ RUNNING = "RUNNING",
+ EXECUTING = "EXECUTING",
+ DONE = "DONE"
+ }
+ type Counts = {
+ RECEIVED: number,
+ QUEUED: number,
+ RUNNING: number,
+ EXECUTING: number,
+ DONE?: number
+ };
+
+ type RedisConnectionOptions = {
+ /**
+ * This object is passed directly to NodeRedis' createClient() method.
+ */
+ readonly clientOptions?: any;
+ /**
+ * An existing NodeRedis client to use. If using, `clientOptions` will be ignored.
+ */
+ readonly client?: any;
+ /**
+ * Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime.
+ */
+ readonly Redis?: any;
+ };
+
+ type IORedisConnectionOptions = {
+ /**
+ * This object is passed directly to ioredis' constructor method.
+ */
+ readonly clientOptions?: any;
+ /**
+ * When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`.
+ */
+ readonly clusterNodes?: any;
+ /**
+ * An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored.
+ */
+ readonly client?: any;
+ /**
+ * Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime.
+ */
+ readonly Redis?: any;
+ };
+
+ type BatcherOptions = {
+ /**
+ * Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event.
+ */
+ readonly maxTime?: number | null;
+ /**
+ * Maximum number of requests in a batch.
+ */
+ readonly maxSize?: number | null;
+ };
+
+ class BottleneckError extends Error {
+ }
+
+ class RedisConnection {
+ constructor(options?: Bottleneck.RedisConnectionOptions);
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Waits until the connection is ready and returns the raw Node_Redis clients.
+ */
+ ready(): Promise;
+
+ /**
+ * Close the redis clients.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+ }
+
+ class IORedisConnection {
+ constructor(options?: Bottleneck.IORedisConnectionOptions);
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: "error", fn: (error: any) => void): void;
+
+ /**
+ * Waits until the connection is ready and returns the raw ioredis clients.
+ */
+ ready(): Promise;
+
+ /**
+ * Close the redis clients.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+ }
+
+ class Batcher {
+ constructor(options?: Bottleneck.BatcherOptions);
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: string, fn: Function): void;
+ on(name: "error", fn: (error: any) => void): void;
+ on(name: "batch", fn: (batch: any[]) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: string, fn: Function): void;
+ once(name: "error", fn: (error: any) => void): void;
+ once(name: "batch", fn: (batch: any[]) => void): void;
+
+ /**
+ * Add a request to the Batcher. Batches are flushed to the "batch" event.
+ */
+ add(data: any): Promise;
+ }
+
+ class Group {
+ constructor(options?: Bottleneck.ConstructorOptions);
+
+ id: string;
+ datastore: string;
+ connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
+
+ /**
+ * Returns the limiter for the specified key.
+ * @param str - The limiter key.
+ */
+ key(str: string): Bottleneck;
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: string, fn: Function): void;
+ on(name: "error", fn: (error: any) => void): void;
+ on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: string, fn: Function): void;
+ once(name: "error", fn: (error: any) => void): void;
+ once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void;
+
+ /**
+ * Removes all registered event listeners.
+ * @param name - The optional event name to remove listeners from.
+ */
+ removeAllListeners(name?: string): void;
+
+ /**
+ * Updates the group settings.
+ * @param options - The new settings.
+ */
+ updateSettings(options: Bottleneck.ConstructorOptions): void;
+
+ /**
+ * Deletes the limiter for the given key.
+ * Returns true if a key was deleted.
+ * @param str - The key
+ */
+ deleteKey(str: string): Promise;
+
+ /**
+ * Disconnects the underlying redis clients, unless the Group was created with the `connection` option.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+
+ /**
+ * Returns all the key-limiter pairs.
+ */
+ limiters(): Bottleneck.GroupLimiterPair[];
+
+ /**
+ * Returns all Group keys in the local instance
+ */
+ keys(): string[];
+
+ /**
+ * Returns all Group keys in the Cluster
+ */
+ clusterKeys(): Promise;
+ }
+
+ class Events {
+ constructor(object: Object);
+
+ /**
+ * Returns the number of limiters for the event name
+ * @param name - The event name.
+ */
+ listenerCount(name: string): number;
+
+ /**
+ * Returns a promise with the first non-null/non-undefined result from a listener
+ * @param name - The event name.
+ * @param args - The arguments to pass to the event listeners.
+ */
+ trigger(name: string, ...args: any[]): Promise;
+ }
+ }
+
+ class Bottleneck {
+ public static readonly strategy: {
+ /**
+ * When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added.
+ */
+ readonly LEAK: Bottleneck.Strategy;
+ /**
+ * Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added.
+ */
+ readonly OVERFLOW_PRIORITY: Bottleneck.Strategy;
+ /**
+ * When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels.
+ */
+ readonly OVERFLOW: Bottleneck.Strategy;
+ /**
+ * When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels.
+ */
+ readonly BLOCK: Bottleneck.Strategy;
+ };
+
+ constructor(options?: Bottleneck.ConstructorOptions);
+
+ id: string;
+ datastore: string;
+ connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection;
+
+ /**
+ * Returns a promise which will be resolved once the limiter is ready to accept jobs
+ * or rejected if it fails to start up.
+ */
+ ready(): Promise;
+
+ /**
+ * Returns a datastore-specific object of redis clients.
+ */
+ clients(): Bottleneck.ClientsList;
+
+ /**
+ * Returns the name of the Redis pubsub channel used for this limiter
+ */
+ channel(): string;
+
+ /**
+ * Disconnects the underlying redis clients, unless the limiter was created with the `connection` option.
+ * @param flush - Write transient data before closing.
+ */
+ disconnect(flush?: boolean): Promise;
+
+ /**
+ * Broadcast a string to every limiter in the Cluster.
+ */
+ publish(message: string): Promise;
+
+ /**
+ * Returns an object with the current number of jobs per status.
+ */
+ counts(): Bottleneck.Counts;
+
+ /**
+ * Returns the status of the job with the provided job id.
+ */
+ jobStatus(id: string): Bottleneck.Status;
+
+ /**
+ * Returns the status of the job with the provided job id.
+ */
+ jobs(status?: Bottleneck.Status): string[];
+
+ /**
+ * Returns the number of requests queued.
+ * @param priority - Returns the number of requests queued with the specified priority.
+ */
+ queued(priority?: number): number;
+
+ /**
+ * Returns the number of requests queued across the Cluster.
+ */
+ clusterQueued(): Promise;
+
+ /**
+ * Returns whether there are any jobs currently in the queue or in the process of being added to the queue.
+ */
+ empty(): boolean;
+
+ /**
+ * Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster.
+ */
+ running(): Promise;
+
+ /**
+ * Returns the total weight of jobs in a DONE state in the Cluster.
+ */
+ done(): Promise;
+
+ /**
+ * If a request was added right now, would it be run immediately?
+ * @param weight - The weight of the request
+ */
+ check(weight?: number): Promise;
+
+ /**
+ * Register an event listener.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ on(name: "error", fn: (error: any) => void): void;
+ on(name: "empty", fn: () => void): void;
+ on(name: "idle", fn: () => void): void;
+ on(name: "depleted", fn: (empty: boolean) => void): void;
+ on(name: "message", fn: (message: string) => void): void;
+ on(name: "debug", fn: (message: string, info: any) => void): void;
+ on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
+ on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
+ on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
+ on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
+ on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+ on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void;
+ on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
+ on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+
+ /**
+ * Register an event listener for one event only.
+ * @param name - The event name.
+ * @param fn - The callback function.
+ */
+ once(name: "error", fn: (error: any) => void): void;
+ once(name: "empty", fn: () => void): void;
+ once(name: "idle", fn: () => void): void;
+ once(name: "depleted", fn: (empty: boolean) => void): void;
+ once(name: "message", fn: (message: string) => void): void;
+ once(name: "debug", fn: (message: string, info: any) => void): void;
+ once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void;
+ once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void;
+ once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void;
+ once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void;
+ once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+ once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void;
+ once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void;
+ once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void;
+
+ /**
+ * Removes all registered event listeners.
+ * @param name - The optional event name to remove listeners from.
+ */
+ removeAllListeners(name?: string): void;
+
+ /**
+ * Changes the settings for future requests.
+ * @param options - The new settings.
+ */
+ updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck;
+
+ /**
+ * Adds to the reservoir count and returns the new value.
+ */
+ incrementReservoir(incrementBy: number): Promise;
+
+ /**
+ * The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete.
+ */
+ stop(options?: Bottleneck.StopOptions): Promise;
+
+ /**
+ * Returns the current reservoir count, if any.
+ */
+ currentReservoir(): Promise;
+
+ /**
+ * Chain this limiter to another.
+ * @param limiter - The limiter that requests to this limiter must also follow.
+ */
+ chain(limiter?: Bottleneck): Bottleneck;
+
+ <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
+ wrap, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike): ((<%_ for (var idx = 1; idx <= count; idx++) { _%><%_ if (idx > 1) { %>, <% } %>arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise) & { withOptions: (options: Bottleneck.JobOptions<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise; };
+ <%_ } _%>
+
+ <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
+ submit, A<%= idx %><%_ } _%>>(fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback): void;
+ <%_ } _%>
+
+ <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
+ submit, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback): void;
+ <%_ } _%>
+
+ <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
+ schedule, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise;
+ <%_ } _%>
+
+ <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%>
+ schedule, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise;
+ <%_ } _%>
+ }
+
+ export default Bottleneck;
+}
diff --git a/node_modules/bottleneck/bower.json b/node_modules/bottleneck/bower.json
new file mode 100644
index 000000000..b72e87ee3
--- /dev/null
+++ b/node_modules/bottleneck/bower.json
@@ -0,0 +1,30 @@
+{
+ "name": "bottleneck",
+ "main": "bottleneck.js",
+ "version": "2.19.5",
+ "homepage": "https://github.com/SGrondin/bottleneck",
+ "authors": [
+ "SGrondin "
+ ],
+ "description": "Distributed task scheduler and rate limiter",
+ "moduleType": [
+ "globals",
+ "node"
+ ],
+ "keywords": [
+ "async",
+ "rate",
+ "limiter",
+ "limiting",
+ "throttle",
+ "throttling",
+ "load",
+ "ddos"
+ ],
+ "license": "MIT",
+ "ignore": [
+ "**/.*",
+ "node_modules",
+ "bower_components"
+ ]
+}
diff --git a/node_modules/bottleneck/es5.js b/node_modules/bottleneck/es5.js
new file mode 100644
index 000000000..a177b6540
--- /dev/null
+++ b/node_modules/bottleneck/es5.js
@@ -0,0 +1,5064 @@
+/**
+ * This file contains the full Bottleneck library (MIT) compiled to ES5.
+ * https://github.com/SGrondin/bottleneck
+ * It also contains the regenerator-runtime (MIT), necessary for Babel-generated ES5 code to execute promise and async/await code.
+ * See the following link for Copyright and License information:
+ * https://github.com/facebook/regenerator/blob/master/packages/regenerator-runtime/runtime.js
+ */
+(function (global, factory) {
+ typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
+ typeof define === 'function' && define.amd ? define(factory) :
+ (global.Bottleneck = factory());
+}(this, (function () { 'use strict';
+
+ var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};
+
+ function createCommonjsModule(fn, module) {
+ return module = { exports: {} }, fn(module, module.exports), module.exports;
+ }
+
+ function getCjsExportFromNamespace (n) {
+ return n && n['default'] || n;
+ }
+
+ var runtime = createCommonjsModule(function (module) {
+ /**
+ * Copyright (c) 2014-present, Facebook, Inc.
+ *
+ * This source code is licensed under the MIT license found in the
+ * LICENSE file in the root directory of this source tree.
+ */
+
+ !(function(global) {
+
+ var Op = Object.prototype;
+ var hasOwn = Op.hasOwnProperty;
+ var undefined; // More compressible than void 0.
+ var $Symbol = typeof Symbol === "function" ? Symbol : {};
+ var iteratorSymbol = $Symbol.iterator || "@@iterator";
+ var asyncIteratorSymbol = $Symbol.asyncIterator || "@@asyncIterator";
+ var toStringTagSymbol = $Symbol.toStringTag || "@@toStringTag";
+ var runtime = global.regeneratorRuntime;
+ if (runtime) {
+ {
+ // If regeneratorRuntime is defined globally and we're in a module,
+ // make the exports object identical to regeneratorRuntime.
+ module.exports = runtime;
+ }
+ // Don't bother evaluating the rest of this file if the runtime was
+ // already defined globally.
+ return;
+ }
+
+ // Define the runtime globally (as expected by generated code) as either
+ // module.exports (if we're in a module) or a new, empty object.
+ runtime = global.regeneratorRuntime = module.exports;
+
+ function wrap(innerFn, outerFn, self, tryLocsList) {
+ // If outerFn provided and outerFn.prototype is a Generator, then outerFn.prototype instanceof Generator.
+ var protoGenerator = outerFn && outerFn.prototype instanceof Generator ? outerFn : Generator;
+ var generator = Object.create(protoGenerator.prototype);
+ var context = new Context(tryLocsList || []);
+
+ // The ._invoke method unifies the implementations of the .next,
+ // .throw, and .return methods.
+ generator._invoke = makeInvokeMethod(innerFn, self, context);
+
+ return generator;
+ }
+ runtime.wrap = wrap;
+
+ // Try/catch helper to minimize deoptimizations. Returns a completion
+ // record like context.tryEntries[i].completion. This interface could
+ // have been (and was previously) designed to take a closure to be
+ // invoked without arguments, but in all the cases we care about we
+ // already have an existing method we want to call, so there's no need
+ // to create a new function object. We can even get away with assuming
+ // the method takes exactly one argument, since that happens to be true
+ // in every case, so we don't have to touch the arguments object. The
+ // only additional allocation required is the completion record, which
+ // has a stable shape and so hopefully should be cheap to allocate.
+ function tryCatch(fn, obj, arg) {
+ try {
+ return { type: "normal", arg: fn.call(obj, arg) };
+ } catch (err) {
+ return { type: "throw", arg: err };
+ }
+ }
+
+ var GenStateSuspendedStart = "suspendedStart";
+ var GenStateSuspendedYield = "suspendedYield";
+ var GenStateExecuting = "executing";
+ var GenStateCompleted = "completed";
+
+ // Returning this object from the innerFn has the same effect as
+ // breaking out of the dispatch switch statement.
+ var ContinueSentinel = {};
+
+ // Dummy constructor functions that we use as the .constructor and
+ // .constructor.prototype properties for functions that return Generator
+ // objects. For full spec compliance, you may wish to configure your
+ // minifier not to mangle the names of these two functions.
+ function Generator() {}
+ function GeneratorFunction() {}
+ function GeneratorFunctionPrototype() {}
+
+ // This is a polyfill for %IteratorPrototype% for environments that
+ // don't natively support it.
+ var IteratorPrototype = {};
+ IteratorPrototype[iteratorSymbol] = function () {
+ return this;
+ };
+
+ var getProto = Object.getPrototypeOf;
+ var NativeIteratorPrototype = getProto && getProto(getProto(values([])));
+ if (NativeIteratorPrototype &&
+ NativeIteratorPrototype !== Op &&
+ hasOwn.call(NativeIteratorPrototype, iteratorSymbol)) {
+ // This environment has a native %IteratorPrototype%; use it instead
+ // of the polyfill.
+ IteratorPrototype = NativeIteratorPrototype;
+ }
+
+ var Gp = GeneratorFunctionPrototype.prototype =
+ Generator.prototype = Object.create(IteratorPrototype);
+ GeneratorFunction.prototype = Gp.constructor = GeneratorFunctionPrototype;
+ GeneratorFunctionPrototype.constructor = GeneratorFunction;
+ GeneratorFunctionPrototype[toStringTagSymbol] =
+ GeneratorFunction.displayName = "GeneratorFunction";
+
+ // Helper for defining the .next, .throw, and .return methods of the
+ // Iterator interface in terms of a single ._invoke method.
+ function defineIteratorMethods(prototype) {
+ ["next", "throw", "return"].forEach(function(method) {
+ prototype[method] = function(arg) {
+ return this._invoke(method, arg);
+ };
+ });
+ }
+
+ runtime.isGeneratorFunction = function(genFun) {
+ var ctor = typeof genFun === "function" && genFun.constructor;
+ return ctor
+ ? ctor === GeneratorFunction ||
+ // For the native GeneratorFunction constructor, the best we can
+ // do is to check its .name property.
+ (ctor.displayName || ctor.name) === "GeneratorFunction"
+ : false;
+ };
+
+ runtime.mark = function(genFun) {
+ if (Object.setPrototypeOf) {
+ Object.setPrototypeOf(genFun, GeneratorFunctionPrototype);
+ } else {
+ genFun.__proto__ = GeneratorFunctionPrototype;
+ if (!(toStringTagSymbol in genFun)) {
+ genFun[toStringTagSymbol] = "GeneratorFunction";
+ }
+ }
+ genFun.prototype = Object.create(Gp);
+ return genFun;
+ };
+
+ // Within the body of any async function, `await x` is transformed to
+ // `yield regeneratorRuntime.awrap(x)`, so that the runtime can test
+ // `hasOwn.call(value, "__await")` to determine if the yielded value is
+ // meant to be awaited.
+ runtime.awrap = function(arg) {
+ return { __await: arg };
+ };
+
+ function AsyncIterator(generator) {
+ function invoke(method, arg, resolve, reject) {
+ var record = tryCatch(generator[method], generator, arg);
+ if (record.type === "throw") {
+ reject(record.arg);
+ } else {
+ var result = record.arg;
+ var value = result.value;
+ if (value &&
+ typeof value === "object" &&
+ hasOwn.call(value, "__await")) {
+ return Promise.resolve(value.__await).then(function(value) {
+ invoke("next", value, resolve, reject);
+ }, function(err) {
+ invoke("throw", err, resolve, reject);
+ });
+ }
+
+ return Promise.resolve(value).then(function(unwrapped) {
+ // When a yielded Promise is resolved, its final value becomes
+ // the .value of the Promise<{value,done}> result for the
+ // current iteration.
+ result.value = unwrapped;
+ resolve(result);
+ }, function(error) {
+ // If a rejected Promise was yielded, throw the rejection back
+ // into the async generator function so it can be handled there.
+ return invoke("throw", error, resolve, reject);
+ });
+ }
+ }
+
+ var previousPromise;
+
+ function enqueue(method, arg) {
+ function callInvokeWithMethodAndArg() {
+ return new Promise(function(resolve, reject) {
+ invoke(method, arg, resolve, reject);
+ });
+ }
+
+ return previousPromise =
+ // If enqueue has been called before, then we want to wait until
+ // all previous Promises have been resolved before calling invoke,
+ // so that results are always delivered in the correct order. If
+ // enqueue has not been called before, then it is important to
+ // call invoke immediately, without waiting on a callback to fire,
+ // so that the async generator function has the opportunity to do
+ // any necessary setup in a predictable way. This predictability
+ // is why the Promise constructor synchronously invokes its
+ // executor callback, and why async functions synchronously
+ // execute code before the first await. Since we implement simple
+ // async functions in terms of async generators, it is especially
+ // important to get this right, even though it requires care.
+ previousPromise ? previousPromise.then(
+ callInvokeWithMethodAndArg,
+ // Avoid propagating failures to Promises returned by later
+ // invocations of the iterator.
+ callInvokeWithMethodAndArg
+ ) : callInvokeWithMethodAndArg();
+ }
+
+ // Define the unified helper method that is used to implement .next,
+ // .throw, and .return (see defineIteratorMethods).
+ this._invoke = enqueue;
+ }
+
+ defineIteratorMethods(AsyncIterator.prototype);
+ AsyncIterator.prototype[asyncIteratorSymbol] = function () {
+ return this;
+ };
+ runtime.AsyncIterator = AsyncIterator;
+
+ // Note that simple async functions are implemented on top of
+ // AsyncIterator objects; they just return a Promise for the value of
+ // the final result produced by the iterator.
+ runtime.async = function(innerFn, outerFn, self, tryLocsList) {
+ var iter = new AsyncIterator(
+ wrap(innerFn, outerFn, self, tryLocsList)
+ );
+
+ return runtime.isGeneratorFunction(outerFn)
+ ? iter // If outerFn is a generator, return the full iterator.
+ : iter.next().then(function(result) {
+ return result.done ? result.value : iter.next();
+ });
+ };
+
+ function makeInvokeMethod(innerFn, self, context) {
+ var state = GenStateSuspendedStart;
+
+ return function invoke(method, arg) {
+ if (state === GenStateExecuting) {
+ throw new Error("Generator is already running");
+ }
+
+ if (state === GenStateCompleted) {
+ if (method === "throw") {
+ throw arg;
+ }
+
+ // Be forgiving, per 25.3.3.3.3 of the spec:
+ // https://people.mozilla.org/~jorendorff/es6-draft.html#sec-generatorresume
+ return doneResult();
+ }
+
+ context.method = method;
+ context.arg = arg;
+
+ while (true) {
+ var delegate = context.delegate;
+ if (delegate) {
+ var delegateResult = maybeInvokeDelegate(delegate, context);
+ if (delegateResult) {
+ if (delegateResult === ContinueSentinel) continue;
+ return delegateResult;
+ }
+ }
+
+ if (context.method === "next") {
+ // Setting context._sent for legacy support of Babel's
+ // function.sent implementation.
+ context.sent = context._sent = context.arg;
+
+ } else if (context.method === "throw") {
+ if (state === GenStateSuspendedStart) {
+ state = GenStateCompleted;
+ throw context.arg;
+ }
+
+ context.dispatchException(context.arg);
+
+ } else if (context.method === "return") {
+ context.abrupt("return", context.arg);
+ }
+
+ state = GenStateExecuting;
+
+ var record = tryCatch(innerFn, self, context);
+ if (record.type === "normal") {
+ // If an exception is thrown from innerFn, we leave state ===
+ // GenStateExecuting and loop back for another invocation.
+ state = context.done
+ ? GenStateCompleted
+ : GenStateSuspendedYield;
+
+ if (record.arg === ContinueSentinel) {
+ continue;
+ }
+
+ return {
+ value: record.arg,
+ done: context.done
+ };
+
+ } else if (record.type === "throw") {
+ state = GenStateCompleted;
+ // Dispatch the exception by looping back around to the
+ // context.dispatchException(context.arg) call above.
+ context.method = "throw";
+ context.arg = record.arg;
+ }
+ }
+ };
+ }
+
+ // Call delegate.iterator[context.method](context.arg) and handle the
+ // result, either by returning a { value, done } result from the
+ // delegate iterator, or by modifying context.method and context.arg,
+ // setting context.delegate to null, and returning the ContinueSentinel.
+ function maybeInvokeDelegate(delegate, context) {
+ var method = delegate.iterator[context.method];
+ if (method === undefined) {
+ // A .throw or .return when the delegate iterator has no .throw
+ // method always terminates the yield* loop.
+ context.delegate = null;
+
+ if (context.method === "throw") {
+ if (delegate.iterator.return) {
+ // If the delegate iterator has a return method, give it a
+ // chance to clean up.
+ context.method = "return";
+ context.arg = undefined;
+ maybeInvokeDelegate(delegate, context);
+
+ if (context.method === "throw") {
+ // If maybeInvokeDelegate(context) changed context.method from
+ // "return" to "throw", let that override the TypeError below.
+ return ContinueSentinel;
+ }
+ }
+
+ context.method = "throw";
+ context.arg = new TypeError(
+ "The iterator does not provide a 'throw' method");
+ }
+
+ return ContinueSentinel;
+ }
+
+ var record = tryCatch(method, delegate.iterator, context.arg);
+
+ if (record.type === "throw") {
+ context.method = "throw";
+ context.arg = record.arg;
+ context.delegate = null;
+ return ContinueSentinel;
+ }
+
+ var info = record.arg;
+
+ if (! info) {
+ context.method = "throw";
+ context.arg = new TypeError("iterator result is not an object");
+ context.delegate = null;
+ return ContinueSentinel;
+ }
+
+ if (info.done) {
+ // Assign the result of the finished delegate to the temporary
+ // variable specified by delegate.resultName (see delegateYield).
+ context[delegate.resultName] = info.value;
+
+ // Resume execution at the desired location (see delegateYield).
+ context.next = delegate.nextLoc;
+
+ // If context.method was "throw" but the delegate handled the
+ // exception, let the outer generator proceed normally. If
+ // context.method was "next", forget context.arg since it has been
+ // "consumed" by the delegate iterator. If context.method was
+ // "return", allow the original .return call to continue in the
+ // outer generator.
+ if (context.method !== "return") {
+ context.method = "next";
+ context.arg = undefined;
+ }
+
+ } else {
+ // Re-yield the result returned by the delegate method.
+ return info;
+ }
+
+ // The delegate iterator is finished, so forget it and continue with
+ // the outer generator.
+ context.delegate = null;
+ return ContinueSentinel;
+ }
+
+ // Define Generator.prototype.{next,throw,return} in terms of the
+ // unified ._invoke helper method.
+ defineIteratorMethods(Gp);
+
+ Gp[toStringTagSymbol] = "Generator";
+
+ // A Generator should always return itself as the iterator object when the
+ // @@iterator function is called on it. Some browsers' implementations of the
+ // iterator prototype chain incorrectly implement this, causing the Generator
+ // object to not be returned from this call. This ensures that doesn't happen.
+ // See https://github.com/facebook/regenerator/issues/274 for more details.
+ Gp[iteratorSymbol] = function() {
+ return this;
+ };
+
+ Gp.toString = function() {
+ return "[object Generator]";
+ };
+
+ function pushTryEntry(locs) {
+ var entry = { tryLoc: locs[0] };
+
+ if (1 in locs) {
+ entry.catchLoc = locs[1];
+ }
+
+ if (2 in locs) {
+ entry.finallyLoc = locs[2];
+ entry.afterLoc = locs[3];
+ }
+
+ this.tryEntries.push(entry);
+ }
+
+ function resetTryEntry(entry) {
+ var record = entry.completion || {};
+ record.type = "normal";
+ delete record.arg;
+ entry.completion = record;
+ }
+
+ function Context(tryLocsList) {
+ // The root entry object (effectively a try statement without a catch
+ // or a finally block) gives us a place to store values thrown from
+ // locations where there is no enclosing try statement.
+ this.tryEntries = [{ tryLoc: "root" }];
+ tryLocsList.forEach(pushTryEntry, this);
+ this.reset(true);
+ }
+
+ runtime.keys = function(object) {
+ var keys = [];
+ for (var key in object) {
+ keys.push(key);
+ }
+ keys.reverse();
+
+ // Rather than returning an object with a next method, we keep
+ // things simple and return the next function itself.
+ return function next() {
+ while (keys.length) {
+ var key = keys.pop();
+ if (key in object) {
+ next.value = key;
+ next.done = false;
+ return next;
+ }
+ }
+
+ // To avoid creating an additional object, we just hang the .value
+ // and .done properties off the next function object itself. This
+ // also ensures that the minifier will not anonymize the function.
+ next.done = true;
+ return next;
+ };
+ };
+
+ function values(iterable) {
+ if (iterable) {
+ var iteratorMethod = iterable[iteratorSymbol];
+ if (iteratorMethod) {
+ return iteratorMethod.call(iterable);
+ }
+
+ if (typeof iterable.next === "function") {
+ return iterable;
+ }
+
+ if (!isNaN(iterable.length)) {
+ var i = -1, next = function next() {
+ while (++i < iterable.length) {
+ if (hasOwn.call(iterable, i)) {
+ next.value = iterable[i];
+ next.done = false;
+ return next;
+ }
+ }
+
+ next.value = undefined;
+ next.done = true;
+
+ return next;
+ };
+
+ return next.next = next;
+ }
+ }
+
+ // Return an iterator with no values.
+ return { next: doneResult };
+ }
+ runtime.values = values;
+
+ function doneResult() {
+ return { value: undefined, done: true };
+ }
+
+ Context.prototype = {
+ constructor: Context,
+
+ reset: function(skipTempReset) {
+ this.prev = 0;
+ this.next = 0;
+ // Resetting context._sent for legacy support of Babel's
+ // function.sent implementation.
+ this.sent = this._sent = undefined;
+ this.done = false;
+ this.delegate = null;
+
+ this.method = "next";
+ this.arg = undefined;
+
+ this.tryEntries.forEach(resetTryEntry);
+
+ if (!skipTempReset) {
+ for (var name in this) {
+ // Not sure about the optimal order of these conditions:
+ if (name.charAt(0) === "t" &&
+ hasOwn.call(this, name) &&
+ !isNaN(+name.slice(1))) {
+ this[name] = undefined;
+ }
+ }
+ }
+ },
+
+ stop: function() {
+ this.done = true;
+
+ var rootEntry = this.tryEntries[0];
+ var rootRecord = rootEntry.completion;
+ if (rootRecord.type === "throw") {
+ throw rootRecord.arg;
+ }
+
+ return this.rval;
+ },
+
+ dispatchException: function(exception) {
+ if (this.done) {
+ throw exception;
+ }
+
+ var context = this;
+ function handle(loc, caught) {
+ record.type = "throw";
+ record.arg = exception;
+ context.next = loc;
+
+ if (caught) {
+ // If the dispatched exception was caught by a catch block,
+ // then let that catch block handle the exception normally.
+ context.method = "next";
+ context.arg = undefined;
+ }
+
+ return !! caught;
+ }
+
+ for (var i = this.tryEntries.length - 1; i >= 0; --i) {
+ var entry = this.tryEntries[i];
+ var record = entry.completion;
+
+ if (entry.tryLoc === "root") {
+ // Exception thrown outside of any try block that could handle
+ // it, so set the completion value of the entire function to
+ // throw the exception.
+ return handle("end");
+ }
+
+ if (entry.tryLoc <= this.prev) {
+ var hasCatch = hasOwn.call(entry, "catchLoc");
+ var hasFinally = hasOwn.call(entry, "finallyLoc");
+
+ if (hasCatch && hasFinally) {
+ if (this.prev < entry.catchLoc) {
+ return handle(entry.catchLoc, true);
+ } else if (this.prev < entry.finallyLoc) {
+ return handle(entry.finallyLoc);
+ }
+
+ } else if (hasCatch) {
+ if (this.prev < entry.catchLoc) {
+ return handle(entry.catchLoc, true);
+ }
+
+ } else if (hasFinally) {
+ if (this.prev < entry.finallyLoc) {
+ return handle(entry.finallyLoc);
+ }
+
+ } else {
+ throw new Error("try statement without catch or finally");
+ }
+ }
+ }
+ },
+
+ abrupt: function(type, arg) {
+ for (var i = this.tryEntries.length - 1; i >= 0; --i) {
+ var entry = this.tryEntries[i];
+ if (entry.tryLoc <= this.prev &&
+ hasOwn.call(entry, "finallyLoc") &&
+ this.prev < entry.finallyLoc) {
+ var finallyEntry = entry;
+ break;
+ }
+ }
+
+ if (finallyEntry &&
+ (type === "break" ||
+ type === "continue") &&
+ finallyEntry.tryLoc <= arg &&
+ arg <= finallyEntry.finallyLoc) {
+ // Ignore the finally entry if control is not jumping to a
+ // location outside the try/catch block.
+ finallyEntry = null;
+ }
+
+ var record = finallyEntry ? finallyEntry.completion : {};
+ record.type = type;
+ record.arg = arg;
+
+ if (finallyEntry) {
+ this.method = "next";
+ this.next = finallyEntry.finallyLoc;
+ return ContinueSentinel;
+ }
+
+ return this.complete(record);
+ },
+
+ complete: function(record, afterLoc) {
+ if (record.type === "throw") {
+ throw record.arg;
+ }
+
+ if (record.type === "break" ||
+ record.type === "continue") {
+ this.next = record.arg;
+ } else if (record.type === "return") {
+ this.rval = this.arg = record.arg;
+ this.method = "return";
+ this.next = "end";
+ } else if (record.type === "normal" && afterLoc) {
+ this.next = afterLoc;
+ }
+
+ return ContinueSentinel;
+ },
+
+ finish: function(finallyLoc) {
+ for (var i = this.tryEntries.length - 1; i >= 0; --i) {
+ var entry = this.tryEntries[i];
+ if (entry.finallyLoc === finallyLoc) {
+ this.complete(entry.completion, entry.afterLoc);
+ resetTryEntry(entry);
+ return ContinueSentinel;
+ }
+ }
+ },
+
+ "catch": function(tryLoc) {
+ for (var i = this.tryEntries.length - 1; i >= 0; --i) {
+ var entry = this.tryEntries[i];
+ if (entry.tryLoc === tryLoc) {
+ var record = entry.completion;
+ if (record.type === "throw") {
+ var thrown = record.arg;
+ resetTryEntry(entry);
+ }
+ return thrown;
+ }
+ }
+
+ // The context.catch method must only be called with a location
+ // argument that corresponds to a known catch block.
+ throw new Error("illegal catch attempt");
+ },
+
+ delegateYield: function(iterable, resultName, nextLoc) {
+ this.delegate = {
+ iterator: values(iterable),
+ resultName: resultName,
+ nextLoc: nextLoc
+ };
+
+ if (this.method === "next") {
+ // Deliberately forget the last sent value so that we don't
+ // accidentally pass it on to the delegate.
+ this.arg = undefined;
+ }
+
+ return ContinueSentinel;
+ }
+ };
+ })(
+ // In sloppy mode, unbound `this` refers to the global object, fallback to
+ // Function constructor if we're in global strict mode. That is sadly a form
+ // of indirect eval which violates Content Security Policy.
+ (function() {
+ return this || (typeof self === "object" && self);
+ })() || Function("return this")()
+ );
+ });
+
+ function _typeof(obj) {
+ if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
+ _typeof = function (obj) {
+ return typeof obj;
+ };
+ } else {
+ _typeof = function (obj) {
+ return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
+ };
+ }
+
+ return _typeof(obj);
+ }
+
+ function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) {
+ try {
+ var info = gen[key](arg);
+ var value = info.value;
+ } catch (error) {
+ reject(error);
+ return;
+ }
+
+ if (info.done) {
+ resolve(value);
+ } else {
+ Promise.resolve(value).then(_next, _throw);
+ }
+ }
+
+ function _asyncToGenerator(fn) {
+ return function () {
+ var self = this,
+ args = arguments;
+ return new Promise(function (resolve, reject) {
+ var gen = fn.apply(self, args);
+
+ function _next(value) {
+ asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value);
+ }
+
+ function _throw(err) {
+ asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err);
+ }
+
+ _next(undefined);
+ });
+ };
+ }
+
+ function _classCallCheck(instance, Constructor) {
+ if (!(instance instanceof Constructor)) {
+ throw new TypeError("Cannot call a class as a function");
+ }
+ }
+
+ function _defineProperties(target, props) {
+ for (var i = 0; i < props.length; i++) {
+ var descriptor = props[i];
+ descriptor.enumerable = descriptor.enumerable || false;
+ descriptor.configurable = true;
+ if ("value" in descriptor) descriptor.writable = true;
+ Object.defineProperty(target, descriptor.key, descriptor);
+ }
+ }
+
+ function _createClass(Constructor, protoProps, staticProps) {
+ if (protoProps) _defineProperties(Constructor.prototype, protoProps);
+ if (staticProps) _defineProperties(Constructor, staticProps);
+ return Constructor;
+ }
+
+ function _inherits(subClass, superClass) {
+ if (typeof superClass !== "function" && superClass !== null) {
+ throw new TypeError("Super expression must either be null or a function");
+ }
+
+ subClass.prototype = Object.create(superClass && superClass.prototype, {
+ constructor: {
+ value: subClass,
+ writable: true,
+ configurable: true
+ }
+ });
+ if (superClass) _setPrototypeOf(subClass, superClass);
+ }
+
+ function _getPrototypeOf(o) {
+ _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) {
+ return o.__proto__ || Object.getPrototypeOf(o);
+ };
+ return _getPrototypeOf(o);
+ }
+
+ function _setPrototypeOf(o, p) {
+ _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) {
+ o.__proto__ = p;
+ return o;
+ };
+
+ return _setPrototypeOf(o, p);
+ }
+
+ function isNativeReflectConstruct() {
+ if (typeof Reflect === "undefined" || !Reflect.construct) return false;
+ if (Reflect.construct.sham) return false;
+ if (typeof Proxy === "function") return true;
+
+ try {
+ Date.prototype.toString.call(Reflect.construct(Date, [], function () {}));
+ return true;
+ } catch (e) {
+ return false;
+ }
+ }
+
+ function _construct(Parent, args, Class) {
+ if (isNativeReflectConstruct()) {
+ _construct = Reflect.construct;
+ } else {
+ _construct = function _construct(Parent, args, Class) {
+ var a = [null];
+ a.push.apply(a, args);
+ var Constructor = Function.bind.apply(Parent, a);
+ var instance = new Constructor();
+ if (Class) _setPrototypeOf(instance, Class.prototype);
+ return instance;
+ };
+ }
+
+ return _construct.apply(null, arguments);
+ }
+
+ function _isNativeFunction(fn) {
+ return Function.toString.call(fn).indexOf("[native code]") !== -1;
+ }
+
+ function _wrapNativeSuper(Class) {
+ var _cache = typeof Map === "function" ? new Map() : undefined;
+
+ _wrapNativeSuper = function _wrapNativeSuper(Class) {
+ if (Class === null || !_isNativeFunction(Class)) return Class;
+
+ if (typeof Class !== "function") {
+ throw new TypeError("Super expression must either be null or a function");
+ }
+
+ if (typeof _cache !== "undefined") {
+ if (_cache.has(Class)) return _cache.get(Class);
+
+ _cache.set(Class, Wrapper);
+ }
+
+ function Wrapper() {
+ return _construct(Class, arguments, _getPrototypeOf(this).constructor);
+ }
+
+ Wrapper.prototype = Object.create(Class.prototype, {
+ constructor: {
+ value: Wrapper,
+ enumerable: false,
+ writable: true,
+ configurable: true
+ }
+ });
+ return _setPrototypeOf(Wrapper, Class);
+ };
+
+ return _wrapNativeSuper(Class);
+ }
+
+ function _assertThisInitialized(self) {
+ if (self === void 0) {
+ throw new ReferenceError("this hasn't been initialised - super() hasn't been called");
+ }
+
+ return self;
+ }
+
+ function _possibleConstructorReturn(self, call) {
+ if (call && (typeof call === "object" || typeof call === "function")) {
+ return call;
+ }
+
+ return _assertThisInitialized(self);
+ }
+
+ function _slicedToArray(arr, i) {
+ return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest();
+ }
+
+ function _toArray(arr) {
+ return _arrayWithHoles(arr) || _iterableToArray(arr) || _nonIterableRest();
+ }
+
+ function _toConsumableArray(arr) {
+ return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _nonIterableSpread();
+ }
+
+ function _arrayWithoutHoles(arr) {
+ if (Array.isArray(arr)) {
+ for (var i = 0, arr2 = new Array(arr.length); i < arr.length; i++) arr2[i] = arr[i];
+
+ return arr2;
+ }
+ }
+
+ function _arrayWithHoles(arr) {
+ if (Array.isArray(arr)) return arr;
+ }
+
+ function _iterableToArray(iter) {
+ if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter);
+ }
+
+ function _iterableToArrayLimit(arr, i) {
+ var _arr = [];
+ var _n = true;
+ var _d = false;
+ var _e = undefined;
+
+ try {
+ for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) {
+ _arr.push(_s.value);
+
+ if (i && _arr.length === i) break;
+ }
+ } catch (err) {
+ _d = true;
+ _e = err;
+ } finally {
+ try {
+ if (!_n && _i["return"] != null) _i["return"]();
+ } finally {
+ if (_d) throw _e;
+ }
+ }
+
+ return _arr;
+ }
+
+ function _nonIterableSpread() {
+ throw new TypeError("Invalid attempt to spread non-iterable instance");
+ }
+
+ function _nonIterableRest() {
+ throw new TypeError("Invalid attempt to destructure non-iterable instance");
+ }
+
+ var load = function load(received, defaults) {
+ var onto = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+ var k, ref, v;
+
+ for (k in defaults) {
+ v = defaults[k];
+ onto[k] = (ref = received[k]) != null ? ref : v;
+ }
+
+ return onto;
+ };
+
+ var overwrite = function overwrite(received, defaults) {
+ var onto = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+ var k, v;
+
+ for (k in received) {
+ v = received[k];
+
+ if (defaults[k] !== void 0) {
+ onto[k] = v;
+ }
+ }
+
+ return onto;
+ };
+
+ var parser = {
+ load: load,
+ overwrite: overwrite
+ };
+
+ var DLList;
+
+ DLList =
+ /*#__PURE__*/
+ function () {
+ function DLList(incr, decr) {
+ _classCallCheck(this, DLList);
+
+ this.incr = incr;
+ this.decr = decr;
+ this._first = null;
+ this._last = null;
+ this.length = 0;
+ }
+
+ _createClass(DLList, [{
+ key: "push",
+ value: function push(value) {
+ var node;
+ this.length++;
+
+ if (typeof this.incr === "function") {
+ this.incr();
+ }
+
+ node = {
+ value: value,
+ prev: this._last,
+ next: null
+ };
+
+ if (this._last != null) {
+ this._last.next = node;
+ this._last = node;
+ } else {
+ this._first = this._last = node;
+ }
+
+ return void 0;
+ }
+ }, {
+ key: "shift",
+ value: function shift() {
+ var value;
+
+ if (this._first == null) {
+ return;
+ } else {
+ this.length--;
+
+ if (typeof this.decr === "function") {
+ this.decr();
+ }
+ }
+
+ value = this._first.value;
+
+ if ((this._first = this._first.next) != null) {
+ this._first.prev = null;
+ } else {
+ this._last = null;
+ }
+
+ return value;
+ }
+ }, {
+ key: "first",
+ value: function first() {
+ if (this._first != null) {
+ return this._first.value;
+ }
+ }
+ }, {
+ key: "getArray",
+ value: function getArray() {
+ var node, ref, results;
+ node = this._first;
+ results = [];
+
+ while (node != null) {
+ results.push((ref = node, node = node.next, ref.value));
+ }
+
+ return results;
+ }
+ }, {
+ key: "forEachShift",
+ value: function forEachShift(cb) {
+ var node;
+ node = this.shift();
+
+ while (node != null) {
+ cb(node), node = this.shift();
+ }
+
+ return void 0;
+ }
+ }, {
+ key: "debug",
+ value: function debug() {
+ var node, ref, ref1, ref2, results;
+ node = this._first;
+ results = [];
+
+ while (node != null) {
+ results.push((ref = node, node = node.next, {
+ value: ref.value,
+ prev: (ref1 = ref.prev) != null ? ref1.value : void 0,
+ next: (ref2 = ref.next) != null ? ref2.value : void 0
+ }));
+ }
+
+ return results;
+ }
+ }]);
+
+ return DLList;
+ }();
+
+ var DLList_1 = DLList;
+
+ var Events;
+
+ Events =
+ /*#__PURE__*/
+ function () {
+ function Events(instance) {
+ var _this = this;
+
+ _classCallCheck(this, Events);
+
+ this.instance = instance;
+ this._events = {};
+
+ if (this.instance.on != null || this.instance.once != null || this.instance.removeAllListeners != null) {
+ throw new Error("An Emitter already exists for this object");
+ }
+
+ this.instance.on = function (name, cb) {
+ return _this._addListener(name, "many", cb);
+ };
+
+ this.instance.once = function (name, cb) {
+ return _this._addListener(name, "once", cb);
+ };
+
+ this.instance.removeAllListeners = function () {
+ var name = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null;
+
+ if (name != null) {
+ return delete _this._events[name];
+ } else {
+ return _this._events = {};
+ }
+ };
+ }
+
+ _createClass(Events, [{
+ key: "_addListener",
+ value: function _addListener(name, status, cb) {
+ var base;
+
+ if ((base = this._events)[name] == null) {
+ base[name] = [];
+ }
+
+ this._events[name].push({
+ cb: cb,
+ status: status
+ });
+
+ return this.instance;
+ }
+ }, {
+ key: "listenerCount",
+ value: function listenerCount(name) {
+ if (this._events[name] != null) {
+ return this._events[name].length;
+ } else {
+ return 0;
+ }
+ }
+ }, {
+ key: "trigger",
+ value: function () {
+ var _trigger = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2(name) {
+ var _this2 = this;
+
+ var _len,
+ args,
+ _key,
+ e,
+ promises,
+ _args2 = arguments;
+
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ for (_len = _args2.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
+ args[_key - 1] = _args2[_key];
+ }
+
+ _context2.prev = 1;
+
+ if (name !== "debug") {
+ this.trigger("debug", "Event triggered: ".concat(name), args);
+ }
+
+ if (!(this._events[name] == null)) {
+ _context2.next = 5;
+ break;
+ }
+
+ return _context2.abrupt("return");
+
+ case 5:
+ this._events[name] = this._events[name].filter(function (listener) {
+ return listener.status !== "none";
+ });
+ promises = this._events[name].map(
+ /*#__PURE__*/
+ function () {
+ var _ref = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee(listener) {
+ var e, returned;
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ if (!(listener.status === "none")) {
+ _context.next = 2;
+ break;
+ }
+
+ return _context.abrupt("return");
+
+ case 2:
+ if (listener.status === "once") {
+ listener.status = "none";
+ }
+
+ _context.prev = 3;
+ returned = typeof listener.cb === "function" ? listener.cb.apply(listener, args) : void 0;
+
+ if (!(typeof (returned != null ? returned.then : void 0) === "function")) {
+ _context.next = 11;
+ break;
+ }
+
+ _context.next = 8;
+ return returned;
+
+ case 8:
+ return _context.abrupt("return", _context.sent);
+
+ case 11:
+ return _context.abrupt("return", returned);
+
+ case 12:
+ _context.next = 19;
+ break;
+
+ case 14:
+ _context.prev = 14;
+ _context.t0 = _context["catch"](3);
+ e = _context.t0;
+
+ {
+ _this2.trigger("error", e);
+ }
+
+ return _context.abrupt("return", null);
+
+ case 19:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, null, [[3, 14]]);
+ }));
+
+ return function (_x2) {
+ return _ref.apply(this, arguments);
+ };
+ }());
+ _context2.next = 9;
+ return Promise.all(promises);
+
+ case 9:
+ _context2.t0 = function (x) {
+ return x != null;
+ };
+
+ return _context2.abrupt("return", _context2.sent.find(_context2.t0));
+
+ case 13:
+ _context2.prev = 13;
+ _context2.t1 = _context2["catch"](1);
+ e = _context2.t1;
+
+ {
+ this.trigger("error", e);
+ }
+
+ return _context2.abrupt("return", null);
+
+ case 18:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2, this, [[1, 13]]);
+ }));
+
+ function trigger(_x) {
+ return _trigger.apply(this, arguments);
+ }
+
+ return trigger;
+ }()
+ }]);
+
+ return Events;
+ }();
+
+ var Events_1 = Events;
+
+ var DLList$1, Events$1, Queues;
+ DLList$1 = DLList_1;
+ Events$1 = Events_1;
+
+ Queues =
+ /*#__PURE__*/
+ function () {
+ function Queues(num_priorities) {
+ _classCallCheck(this, Queues);
+
+ var i;
+ this.Events = new Events$1(this);
+ this._length = 0;
+
+ this._lists = function () {
+ var _this = this;
+
+ var j, ref, results;
+ results = [];
+
+ for (i = j = 1, ref = num_priorities; 1 <= ref ? j <= ref : j >= ref; i = 1 <= ref ? ++j : --j) {
+ results.push(new DLList$1(function () {
+ return _this.incr();
+ }, function () {
+ return _this.decr();
+ }));
+ }
+
+ return results;
+ }.call(this);
+ }
+
+ _createClass(Queues, [{
+ key: "incr",
+ value: function incr() {
+ if (this._length++ === 0) {
+ return this.Events.trigger("leftzero");
+ }
+ }
+ }, {
+ key: "decr",
+ value: function decr() {
+ if (--this._length === 0) {
+ return this.Events.trigger("zero");
+ }
+ }
+ }, {
+ key: "push",
+ value: function push(job) {
+ return this._lists[job.options.priority].push(job);
+ }
+ }, {
+ key: "queued",
+ value: function queued(priority) {
+ if (priority != null) {
+ return this._lists[priority].length;
+ } else {
+ return this._length;
+ }
+ }
+ }, {
+ key: "shiftAll",
+ value: function shiftAll(fn) {
+ return this._lists.forEach(function (list) {
+ return list.forEachShift(fn);
+ });
+ }
+ }, {
+ key: "getFirst",
+ value: function getFirst() {
+ var arr = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : this._lists;
+ var j, len, list;
+
+ for (j = 0, len = arr.length; j < len; j++) {
+ list = arr[j];
+
+ if (list.length > 0) {
+ return list;
+ }
+ }
+
+ return [];
+ }
+ }, {
+ key: "shiftLastFrom",
+ value: function shiftLastFrom(priority) {
+ return this.getFirst(this._lists.slice(priority).reverse()).shift();
+ }
+ }]);
+
+ return Queues;
+ }();
+
+ var Queues_1 = Queues;
+
+ var BottleneckError;
+
+ BottleneckError =
+ /*#__PURE__*/
+ function (_Error) {
+ _inherits(BottleneckError, _Error);
+
+ function BottleneckError() {
+ _classCallCheck(this, BottleneckError);
+
+ return _possibleConstructorReturn(this, _getPrototypeOf(BottleneckError).apply(this, arguments));
+ }
+
+ return BottleneckError;
+ }(_wrapNativeSuper(Error));
+
+ var BottleneckError_1 = BottleneckError;
+
+ var BottleneckError$1, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser$1;
+ NUM_PRIORITIES = 10;
+ DEFAULT_PRIORITY = 5;
+ parser$1 = parser;
+ BottleneckError$1 = BottleneckError_1;
+
+ Job =
+ /*#__PURE__*/
+ function () {
+ function Job(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) {
+ var _this = this;
+
+ _classCallCheck(this, Job);
+
+ this.task = task;
+ this.args = args;
+ this.rejectOnDrop = rejectOnDrop;
+ this.Events = Events;
+ this._states = _states;
+ this.Promise = Promise;
+ this.options = parser$1.load(options, jobDefaults);
+ this.options.priority = this._sanitizePriority(this.options.priority);
+
+ if (this.options.id === jobDefaults.id) {
+ this.options.id = "".concat(this.options.id, "-").concat(this._randomIndex());
+ }
+
+ this.promise = new this.Promise(function (_resolve, _reject) {
+ _this._resolve = _resolve;
+ _this._reject = _reject;
+ });
+ this.retryCount = 0;
+ }
+
+ _createClass(Job, [{
+ key: "_sanitizePriority",
+ value: function _sanitizePriority(priority) {
+ var sProperty;
+ sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority;
+
+ if (sProperty < 0) {
+ return 0;
+ } else if (sProperty > NUM_PRIORITIES - 1) {
+ return NUM_PRIORITIES - 1;
+ } else {
+ return sProperty;
+ }
+ }
+ }, {
+ key: "_randomIndex",
+ value: function _randomIndex() {
+ return Math.random().toString(36).slice(2);
+ }
+ }, {
+ key: "doDrop",
+ value: function doDrop() {
+ var _ref = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {},
+ error = _ref.error,
+ _ref$message = _ref.message,
+ message = _ref$message === void 0 ? "This job has been dropped by Bottleneck" : _ref$message;
+
+ if (this._states.remove(this.options.id)) {
+ if (this.rejectOnDrop) {
+ this._reject(error != null ? error : new BottleneckError$1(message));
+ }
+
+ this.Events.trigger("dropped", {
+ args: this.args,
+ options: this.options,
+ task: this.task,
+ promise: this.promise
+ });
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }, {
+ key: "_assertStatus",
+ value: function _assertStatus(expected) {
+ var status;
+ status = this._states.jobStatus(this.options.id);
+
+ if (!(status === expected || expected === "DONE" && status === null)) {
+ throw new BottleneckError$1("Invalid job status ".concat(status, ", expected ").concat(expected, ". Please open an issue at https://github.com/SGrondin/bottleneck/issues"));
+ }
+ }
+ }, {
+ key: "doReceive",
+ value: function doReceive() {
+ this._states.start(this.options.id);
+
+ return this.Events.trigger("received", {
+ args: this.args,
+ options: this.options
+ });
+ }
+ }, {
+ key: "doQueue",
+ value: function doQueue(reachedHWM, blocked) {
+ this._assertStatus("RECEIVED");
+
+ this._states.next(this.options.id);
+
+ return this.Events.trigger("queued", {
+ args: this.args,
+ options: this.options,
+ reachedHWM: reachedHWM,
+ blocked: blocked
+ });
+ }
+ }, {
+ key: "doRun",
+ value: function doRun() {
+ if (this.retryCount === 0) {
+ this._assertStatus("QUEUED");
+
+ this._states.next(this.options.id);
+ } else {
+ this._assertStatus("EXECUTING");
+ }
+
+ return this.Events.trigger("scheduled", {
+ args: this.args,
+ options: this.options
+ });
+ }
+ }, {
+ key: "doExecute",
+ value: function () {
+ var _doExecute = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee(chained, clearGlobalState, run, free) {
+ var error, eventInfo, passed;
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ if (this.retryCount === 0) {
+ this._assertStatus("RUNNING");
+
+ this._states.next(this.options.id);
+ } else {
+ this._assertStatus("EXECUTING");
+ }
+
+ eventInfo = {
+ args: this.args,
+ options: this.options,
+ retryCount: this.retryCount
+ };
+ this.Events.trigger("executing", eventInfo);
+ _context.prev = 3;
+ _context.next = 6;
+ return chained != null ? chained.schedule.apply(chained, [this.options, this.task].concat(_toConsumableArray(this.args))) : this.task.apply(this, _toConsumableArray(this.args));
+
+ case 6:
+ passed = _context.sent;
+
+ if (!clearGlobalState()) {
+ _context.next = 13;
+ break;
+ }
+
+ this.doDone(eventInfo);
+ _context.next = 11;
+ return free(this.options, eventInfo);
+
+ case 11:
+ this._assertStatus("DONE");
+
+ return _context.abrupt("return", this._resolve(passed));
+
+ case 13:
+ _context.next = 19;
+ break;
+
+ case 15:
+ _context.prev = 15;
+ _context.t0 = _context["catch"](3);
+ error = _context.t0;
+ return _context.abrupt("return", this._onFailure(error, eventInfo, clearGlobalState, run, free));
+
+ case 19:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, this, [[3, 15]]);
+ }));
+
+ function doExecute(_x, _x2, _x3, _x4) {
+ return _doExecute.apply(this, arguments);
+ }
+
+ return doExecute;
+ }()
+ }, {
+ key: "doExpire",
+ value: function doExpire(clearGlobalState, run, free) {
+ var error, eventInfo;
+
+ if (this._states.jobStatus(this.options.id === "RUNNING")) {
+ this._states.next(this.options.id);
+ }
+
+ this._assertStatus("EXECUTING");
+
+ eventInfo = {
+ args: this.args,
+ options: this.options,
+ retryCount: this.retryCount
+ };
+ error = new BottleneckError$1("This job timed out after ".concat(this.options.expiration, " ms."));
+ return this._onFailure(error, eventInfo, clearGlobalState, run, free);
+ }
+ }, {
+ key: "_onFailure",
+ value: function () {
+ var _onFailure2 = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2(error, eventInfo, clearGlobalState, run, free) {
+ var retry, retryAfter;
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ if (!clearGlobalState()) {
+ _context2.next = 16;
+ break;
+ }
+
+ _context2.next = 3;
+ return this.Events.trigger("failed", error, eventInfo);
+
+ case 3:
+ retry = _context2.sent;
+
+ if (!(retry != null)) {
+ _context2.next = 11;
+ break;
+ }
+
+ retryAfter = ~~retry;
+ this.Events.trigger("retry", "Retrying ".concat(this.options.id, " after ").concat(retryAfter, " ms"), eventInfo);
+ this.retryCount++;
+ return _context2.abrupt("return", run(retryAfter));
+
+ case 11:
+ this.doDone(eventInfo);
+ _context2.next = 14;
+ return free(this.options, eventInfo);
+
+ case 14:
+ this._assertStatus("DONE");
+
+ return _context2.abrupt("return", this._reject(error));
+
+ case 16:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2, this);
+ }));
+
+ function _onFailure(_x5, _x6, _x7, _x8, _x9) {
+ return _onFailure2.apply(this, arguments);
+ }
+
+ return _onFailure;
+ }()
+ }, {
+ key: "doDone",
+ value: function doDone(eventInfo) {
+ this._assertStatus("EXECUTING");
+
+ this._states.next(this.options.id);
+
+ return this.Events.trigger("done", eventInfo);
+ }
+ }]);
+
+ return Job;
+ }();
+
+ var Job_1 = Job;
+
+ var BottleneckError$2, LocalDatastore, parser$2;
+ parser$2 = parser;
+ BottleneckError$2 = BottleneckError_1;
+
+ LocalDatastore =
+ /*#__PURE__*/
+ function () {
+ function LocalDatastore(instance, storeOptions, storeInstanceOptions) {
+ _classCallCheck(this, LocalDatastore);
+
+ this.instance = instance;
+ this.storeOptions = storeOptions;
+ this.clientId = this.instance._randomIndex();
+ parser$2.load(storeInstanceOptions, storeInstanceOptions, this);
+ this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now();
+ this._running = 0;
+ this._done = 0;
+ this._unblockTime = 0;
+ this.ready = this.Promise.resolve();
+ this.clients = {};
+
+ this._startHeartbeat();
+ }
+
+ _createClass(LocalDatastore, [{
+ key: "_startHeartbeat",
+ value: function _startHeartbeat() {
+ var _this = this;
+
+ var base;
+
+ if (this.heartbeat == null && (this.storeOptions.reservoirRefreshInterval != null && this.storeOptions.reservoirRefreshAmount != null || this.storeOptions.reservoirIncreaseInterval != null && this.storeOptions.reservoirIncreaseAmount != null)) {
+ return typeof (base = this.heartbeat = setInterval(function () {
+ var amount, incr, maximum, now, reservoir;
+ now = Date.now();
+
+ if (_this.storeOptions.reservoirRefreshInterval != null && now >= _this._lastReservoirRefresh + _this.storeOptions.reservoirRefreshInterval) {
+ _this._lastReservoirRefresh = now;
+ _this.storeOptions.reservoir = _this.storeOptions.reservoirRefreshAmount;
+
+ _this.instance._drainAll(_this.computeCapacity());
+ }
+
+ if (_this.storeOptions.reservoirIncreaseInterval != null && now >= _this._lastReservoirIncrease + _this.storeOptions.reservoirIncreaseInterval) {
+ var _this$storeOptions = _this.storeOptions;
+ amount = _this$storeOptions.reservoirIncreaseAmount;
+ maximum = _this$storeOptions.reservoirIncreaseMaximum;
+ reservoir = _this$storeOptions.reservoir;
+ _this._lastReservoirIncrease = now;
+ incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount;
+
+ if (incr > 0) {
+ _this.storeOptions.reservoir += incr;
+ return _this.instance._drainAll(_this.computeCapacity());
+ }
+ }
+ }, this.heartbeatInterval)).unref === "function" ? base.unref() : void 0;
+ } else {
+ return clearInterval(this.heartbeat);
+ }
+ }
+ }, {
+ key: "__publish__",
+ value: function () {
+ var _publish__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee(message) {
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ _context.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ return _context.abrupt("return", this.instance.Events.trigger("message", message.toString()));
+
+ case 3:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, this);
+ }));
+
+ function __publish__(_x) {
+ return _publish__.apply(this, arguments);
+ }
+
+ return __publish__;
+ }()
+ }, {
+ key: "__disconnect__",
+ value: function () {
+ var _disconnect__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2(flush) {
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ _context2.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ clearInterval(this.heartbeat);
+ return _context2.abrupt("return", this.Promise.resolve());
+
+ case 4:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2, this);
+ }));
+
+ function __disconnect__(_x2) {
+ return _disconnect__.apply(this, arguments);
+ }
+
+ return __disconnect__;
+ }()
+ }, {
+ key: "yieldLoop",
+ value: function yieldLoop() {
+ var t = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
+ return new this.Promise(function (resolve, reject) {
+ return setTimeout(resolve, t);
+ });
+ }
+ }, {
+ key: "computePenalty",
+ value: function computePenalty() {
+ var ref;
+ return (ref = this.storeOptions.penalty) != null ? ref : 15 * this.storeOptions.minTime || 5000;
+ }
+ }, {
+ key: "__updateSettings__",
+ value: function () {
+ var _updateSettings__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee3(options) {
+ return regeneratorRuntime.wrap(function _callee3$(_context3) {
+ while (1) {
+ switch (_context3.prev = _context3.next) {
+ case 0:
+ _context3.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ parser$2.overwrite(options, options, this.storeOptions);
+
+ this._startHeartbeat();
+
+ this.instance._drainAll(this.computeCapacity());
+
+ return _context3.abrupt("return", true);
+
+ case 6:
+ case "end":
+ return _context3.stop();
+ }
+ }
+ }, _callee3, this);
+ }));
+
+ function __updateSettings__(_x3) {
+ return _updateSettings__.apply(this, arguments);
+ }
+
+ return __updateSettings__;
+ }()
+ }, {
+ key: "__running__",
+ value: function () {
+ var _running__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee4() {
+ return regeneratorRuntime.wrap(function _callee4$(_context4) {
+ while (1) {
+ switch (_context4.prev = _context4.next) {
+ case 0:
+ _context4.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ return _context4.abrupt("return", this._running);
+
+ case 3:
+ case "end":
+ return _context4.stop();
+ }
+ }
+ }, _callee4, this);
+ }));
+
+ function __running__() {
+ return _running__.apply(this, arguments);
+ }
+
+ return __running__;
+ }()
+ }, {
+ key: "__queued__",
+ value: function () {
+ var _queued__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee5() {
+ return regeneratorRuntime.wrap(function _callee5$(_context5) {
+ while (1) {
+ switch (_context5.prev = _context5.next) {
+ case 0:
+ _context5.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ return _context5.abrupt("return", this.instance.queued());
+
+ case 3:
+ case "end":
+ return _context5.stop();
+ }
+ }
+ }, _callee5, this);
+ }));
+
+ function __queued__() {
+ return _queued__.apply(this, arguments);
+ }
+
+ return __queued__;
+ }()
+ }, {
+ key: "__done__",
+ value: function () {
+ var _done__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee6() {
+ return regeneratorRuntime.wrap(function _callee6$(_context6) {
+ while (1) {
+ switch (_context6.prev = _context6.next) {
+ case 0:
+ _context6.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ return _context6.abrupt("return", this._done);
+
+ case 3:
+ case "end":
+ return _context6.stop();
+ }
+ }
+ }, _callee6, this);
+ }));
+
+ function __done__() {
+ return _done__.apply(this, arguments);
+ }
+
+ return __done__;
+ }()
+ }, {
+ key: "__groupCheck__",
+ value: function () {
+ var _groupCheck__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee7(time) {
+ return regeneratorRuntime.wrap(function _callee7$(_context7) {
+ while (1) {
+ switch (_context7.prev = _context7.next) {
+ case 0:
+ _context7.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ return _context7.abrupt("return", this._nextRequest + this.timeout < time);
+
+ case 3:
+ case "end":
+ return _context7.stop();
+ }
+ }
+ }, _callee7, this);
+ }));
+
+ function __groupCheck__(_x4) {
+ return _groupCheck__.apply(this, arguments);
+ }
+
+ return __groupCheck__;
+ }()
+ }, {
+ key: "computeCapacity",
+ value: function computeCapacity() {
+ var maxConcurrent, reservoir;
+ var _this$storeOptions2 = this.storeOptions;
+ maxConcurrent = _this$storeOptions2.maxConcurrent;
+ reservoir = _this$storeOptions2.reservoir;
+
+ if (maxConcurrent != null && reservoir != null) {
+ return Math.min(maxConcurrent - this._running, reservoir);
+ } else if (maxConcurrent != null) {
+ return maxConcurrent - this._running;
+ } else if (reservoir != null) {
+ return reservoir;
+ } else {
+ return null;
+ }
+ }
+ }, {
+ key: "conditionsCheck",
+ value: function conditionsCheck(weight) {
+ var capacity;
+ capacity = this.computeCapacity();
+ return capacity == null || weight <= capacity;
+ }
+ }, {
+ key: "__incrementReservoir__",
+ value: function () {
+ var _incrementReservoir__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee8(incr) {
+ var reservoir;
+ return regeneratorRuntime.wrap(function _callee8$(_context8) {
+ while (1) {
+ switch (_context8.prev = _context8.next) {
+ case 0:
+ _context8.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ reservoir = this.storeOptions.reservoir += incr;
+
+ this.instance._drainAll(this.computeCapacity());
+
+ return _context8.abrupt("return", reservoir);
+
+ case 5:
+ case "end":
+ return _context8.stop();
+ }
+ }
+ }, _callee8, this);
+ }));
+
+ function __incrementReservoir__(_x5) {
+ return _incrementReservoir__.apply(this, arguments);
+ }
+
+ return __incrementReservoir__;
+ }()
+ }, {
+ key: "__currentReservoir__",
+ value: function () {
+ var _currentReservoir__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee9() {
+ return regeneratorRuntime.wrap(function _callee9$(_context9) {
+ while (1) {
+ switch (_context9.prev = _context9.next) {
+ case 0:
+ _context9.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ return _context9.abrupt("return", this.storeOptions.reservoir);
+
+ case 3:
+ case "end":
+ return _context9.stop();
+ }
+ }
+ }, _callee9, this);
+ }));
+
+ function __currentReservoir__() {
+ return _currentReservoir__.apply(this, arguments);
+ }
+
+ return __currentReservoir__;
+ }()
+ }, {
+ key: "isBlocked",
+ value: function isBlocked(now) {
+ return this._unblockTime >= now;
+ }
+ }, {
+ key: "check",
+ value: function check(weight, now) {
+ return this.conditionsCheck(weight) && this._nextRequest - now <= 0;
+ }
+ }, {
+ key: "__check__",
+ value: function () {
+ var _check__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee10(weight) {
+ var now;
+ return regeneratorRuntime.wrap(function _callee10$(_context10) {
+ while (1) {
+ switch (_context10.prev = _context10.next) {
+ case 0:
+ _context10.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ now = Date.now();
+ return _context10.abrupt("return", this.check(weight, now));
+
+ case 4:
+ case "end":
+ return _context10.stop();
+ }
+ }
+ }, _callee10, this);
+ }));
+
+ function __check__(_x6) {
+ return _check__.apply(this, arguments);
+ }
+
+ return __check__;
+ }()
+ }, {
+ key: "__register__",
+ value: function () {
+ var _register__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee11(index, weight, expiration) {
+ var now, wait;
+ return regeneratorRuntime.wrap(function _callee11$(_context11) {
+ while (1) {
+ switch (_context11.prev = _context11.next) {
+ case 0:
+ _context11.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ now = Date.now();
+
+ if (!this.conditionsCheck(weight)) {
+ _context11.next = 11;
+ break;
+ }
+
+ this._running += weight;
+
+ if (this.storeOptions.reservoir != null) {
+ this.storeOptions.reservoir -= weight;
+ }
+
+ wait = Math.max(this._nextRequest - now, 0);
+ this._nextRequest = now + wait + this.storeOptions.minTime;
+ return _context11.abrupt("return", {
+ success: true,
+ wait: wait,
+ reservoir: this.storeOptions.reservoir
+ });
+
+ case 11:
+ return _context11.abrupt("return", {
+ success: false
+ });
+
+ case 12:
+ case "end":
+ return _context11.stop();
+ }
+ }
+ }, _callee11, this);
+ }));
+
+ function __register__(_x7, _x8, _x9) {
+ return _register__.apply(this, arguments);
+ }
+
+ return __register__;
+ }()
+ }, {
+ key: "strategyIsBlock",
+ value: function strategyIsBlock() {
+ return this.storeOptions.strategy === 3;
+ }
+ }, {
+ key: "__submit__",
+ value: function () {
+ var _submit__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee12(queueLength, weight) {
+ var blocked, now, reachedHWM;
+ return regeneratorRuntime.wrap(function _callee12$(_context12) {
+ while (1) {
+ switch (_context12.prev = _context12.next) {
+ case 0:
+ _context12.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ if (!(this.storeOptions.maxConcurrent != null && weight > this.storeOptions.maxConcurrent)) {
+ _context12.next = 4;
+ break;
+ }
+
+ throw new BottleneckError$2("Impossible to add a job having a weight of ".concat(weight, " to a limiter having a maxConcurrent setting of ").concat(this.storeOptions.maxConcurrent));
+
+ case 4:
+ now = Date.now();
+ reachedHWM = this.storeOptions.highWater != null && queueLength === this.storeOptions.highWater && !this.check(weight, now);
+ blocked = this.strategyIsBlock() && (reachedHWM || this.isBlocked(now));
+
+ if (blocked) {
+ this._unblockTime = now + this.computePenalty();
+ this._nextRequest = this._unblockTime + this.storeOptions.minTime;
+
+ this.instance._dropAllQueued();
+ }
+
+ return _context12.abrupt("return", {
+ reachedHWM: reachedHWM,
+ blocked: blocked,
+ strategy: this.storeOptions.strategy
+ });
+
+ case 9:
+ case "end":
+ return _context12.stop();
+ }
+ }
+ }, _callee12, this);
+ }));
+
+ function __submit__(_x10, _x11) {
+ return _submit__.apply(this, arguments);
+ }
+
+ return __submit__;
+ }()
+ }, {
+ key: "__free__",
+ value: function () {
+ var _free__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee13(index, weight) {
+ return regeneratorRuntime.wrap(function _callee13$(_context13) {
+ while (1) {
+ switch (_context13.prev = _context13.next) {
+ case 0:
+ _context13.next = 2;
+ return this.yieldLoop();
+
+ case 2:
+ this._running -= weight;
+ this._done += weight;
+
+ this.instance._drainAll(this.computeCapacity());
+
+ return _context13.abrupt("return", {
+ running: this._running
+ });
+
+ case 6:
+ case "end":
+ return _context13.stop();
+ }
+ }
+ }, _callee13, this);
+ }));
+
+ function __free__(_x12, _x13) {
+ return _free__.apply(this, arguments);
+ }
+
+ return __free__;
+ }()
+ }]);
+
+ return LocalDatastore;
+ }();
+
+ var LocalDatastore_1 = LocalDatastore;
+
+ var lua = {
+ "blacklist_client.lua": "local blacklist = ARGV[num_static_argv + 1]\n\nif redis.call('zscore', client_last_seen_key, blacklist) then\n redis.call('zadd', client_last_seen_key, 0, blacklist)\nend\n\n\nreturn {}\n",
+ "check.lua": "local weight = tonumber(ARGV[num_static_argv + 1])\n\nlocal capacity = process_tick(now, false)['capacity']\nlocal nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest'))\n\nreturn conditions_check(capacity, weight) and nextRequest - now <= 0\n",
+ "conditions_check.lua": "local conditions_check = function (capacity, weight)\n return capacity == nil or weight <= capacity\nend\n",
+ "current_reservoir.lua": "return process_tick(now, false)['reservoir']\n",
+ "done.lua": "process_tick(now, false)\n\nreturn tonumber(redis.call('hget', settings_key, 'done'))\n",
+ "free.lua": "local index = ARGV[num_static_argv + 1]\n\nredis.call('zadd', job_expirations_key, 0, index)\n\nreturn process_tick(now, false)['running']\n",
+ "get_time.lua": "redis.replicate_commands()\n\nlocal get_time = function ()\n local time = redis.call('time')\n\n return tonumber(time[1]..string.sub(time[2], 1, 3))\nend\n",
+ "group_check.lua": "return not (redis.call('exists', settings_key) == 1)\n",
+ "heartbeat.lua": "process_tick(now, true)\n",
+ "increment_reservoir.lua": "local incr = tonumber(ARGV[num_static_argv + 1])\n\nredis.call('hincrby', settings_key, 'reservoir', incr)\n\nlocal reservoir = process_tick(now, true)['reservoir']\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn reservoir\n",
+ "init.lua": "local clear = tonumber(ARGV[num_static_argv + 1])\nlocal limiter_version = ARGV[num_static_argv + 2]\nlocal num_local_argv = num_static_argv + 2\n\nif clear == 1 then\n redis.call('del', unpack(KEYS))\nend\n\nif redis.call('exists', settings_key) == 0 then\n -- Create\n local args = {'hmset', settings_key}\n\n for i = num_local_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\n end\n\n redis.call(unpack(args))\n redis.call('hmset', settings_key,\n 'nextRequest', now,\n 'lastReservoirRefresh', now,\n 'lastReservoirIncrease', now,\n 'running', 0,\n 'done', 0,\n 'unblockTime', 0,\n 'capacityPriorityCounter', 0\n )\n\nelse\n -- Apply migrations\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'version'\n )\n local id = settings[1]\n local current_version = settings[2]\n\n if current_version ~= limiter_version then\n local version_digits = {}\n for k, v in string.gmatch(current_version, \"([^.]+)\") do\n table.insert(version_digits, tonumber(k))\n end\n\n -- 2.10.0\n if version_digits[2] < 10 then\n redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '')\n redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '')\n redis.call('hsetnx', settings_key, 'done', 0)\n redis.call('hset', settings_key, 'version', '2.10.0')\n end\n\n -- 2.11.1\n if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then\n if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then\n redis.call('hmset', settings_key,\n 'lastReservoirRefresh', now,\n 'version', '2.11.1'\n )\n end\n end\n\n -- 2.14.0\n if version_digits[2] < 14 then\n local old_running_key = 'b_'..id..'_running'\n local old_executing_key = 'b_'..id..'_executing'\n\n if redis.call('exists', old_running_key) == 1 then\n redis.call('rename', old_running_key, job_weights_key)\n end\n if redis.call('exists', old_executing_key) == 1 then\n redis.call('rename', old_executing_key, job_expirations_key)\n end\n redis.call('hset', settings_key, 'version', '2.14.0')\n end\n\n -- 2.15.2\n if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then\n redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0)\n redis.call('hset', settings_key, 'version', '2.15.2')\n end\n\n -- 2.17.0\n if version_digits[2] < 17 then\n redis.call('hsetnx', settings_key, 'clientTimeout', 10000)\n redis.call('hset', settings_key, 'version', '2.17.0')\n end\n\n -- 2.18.0\n if version_digits[2] < 18 then\n redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '')\n redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now)\n redis.call('hset', settings_key, 'version', '2.18.0')\n end\n\n end\n\n process_tick(now, false)\nend\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n",
+ "process_tick.lua": "local process_tick = function (now, always_publish)\n\n local compute_capacity = function (maxConcurrent, running, reservoir)\n if maxConcurrent ~= nil and reservoir ~= nil then\n return math.min((maxConcurrent - running), reservoir)\n elseif maxConcurrent ~= nil then\n return maxConcurrent - running\n elseif reservoir ~= nil then\n return reservoir\n else\n return nil\n end\n end\n\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'running',\n 'reservoir',\n 'reservoirRefreshInterval',\n 'reservoirRefreshAmount',\n 'lastReservoirRefresh',\n 'reservoirIncreaseInterval',\n 'reservoirIncreaseAmount',\n 'reservoirIncreaseMaximum',\n 'lastReservoirIncrease',\n 'capacityPriorityCounter',\n 'clientTimeout'\n )\n local id = settings[1]\n local maxConcurrent = tonumber(settings[2])\n local running = tonumber(settings[3])\n local reservoir = tonumber(settings[4])\n local reservoirRefreshInterval = tonumber(settings[5])\n local reservoirRefreshAmount = tonumber(settings[6])\n local lastReservoirRefresh = tonumber(settings[7])\n local reservoirIncreaseInterval = tonumber(settings[8])\n local reservoirIncreaseAmount = tonumber(settings[9])\n local reservoirIncreaseMaximum = tonumber(settings[10])\n local lastReservoirIncrease = tonumber(settings[11])\n local capacityPriorityCounter = tonumber(settings[12])\n local clientTimeout = tonumber(settings[13])\n\n local initial_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n --\n -- Process 'running' changes\n --\n local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now)\n\n if #expired > 0 then\n redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now)\n\n local flush_batch = function (batch, acc)\n local weights = redis.call('hmget', job_weights_key, unpack(batch))\n redis.call('hdel', job_weights_key, unpack(batch))\n local clients = redis.call('hmget', job_clients_key, unpack(batch))\n redis.call('hdel', job_clients_key, unpack(batch))\n\n -- Calculate sum of removed weights\n for i = 1, #weights do\n acc['total'] = acc['total'] + (tonumber(weights[i]) or 0)\n end\n\n -- Calculate sum of removed weights by client\n local client_weights = {}\n for i = 1, #clients do\n local removed = tonumber(weights[i]) or 0\n if removed > 0 then\n acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed\n end\n end\n end\n\n local acc = {\n ['total'] = 0,\n ['client_weights'] = {}\n }\n local batch_size = 1000\n\n -- Compute changes to Zsets and apply changes to Hashes\n for i = 1, #expired, batch_size do\n local batch = {}\n for j = i, math.min(i + batch_size - 1, #expired) do\n table.insert(batch, expired[j])\n end\n\n flush_batch(batch, acc)\n end\n\n -- Apply changes to Zsets\n if acc['total'] > 0 then\n redis.call('hincrby', settings_key, 'done', acc['total'])\n running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total']))\n end\n\n for client, weight in pairs(acc['client_weights']) do\n redis.call('zincrby', client_running_key, -weight, client)\n end\n end\n\n --\n -- Process 'reservoir' changes\n --\n local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil\n if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then\n reservoir = reservoirRefreshAmount\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirRefresh', now\n )\n end\n\n local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil\n if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then\n local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval)\n local incr = reservoirIncreaseAmount * num_intervals\n if reservoirIncreaseMaximum ~= nil then\n incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0))\n end\n if incr > 0 then\n reservoir = (reservoir or 0) + incr\n end\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval)\n )\n end\n\n --\n -- Clear unresponsive clients\n --\n local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout))\n local unresponsive_lookup = {}\n local terminated_clients = {}\n for i = 1, #unresponsive do\n unresponsive_lookup[unresponsive[i]] = true\n if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then\n table.insert(terminated_clients, unresponsive[i])\n end\n end\n if #terminated_clients > 0 then\n redis.call('zrem', client_running_key, unpack(terminated_clients))\n redis.call('hdel', client_num_queued_key, unpack(terminated_clients))\n redis.call('zrem', client_last_registered_key, unpack(terminated_clients))\n redis.call('zrem', client_last_seen_key, unpack(terminated_clients))\n end\n\n --\n -- Broadcast capacity changes\n --\n local final_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n if always_publish or (initial_capacity ~= nil and final_capacity == nil) then\n -- always_publish or was not unlimited, now unlimited\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n\n elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then\n -- capacity was increased\n -- send the capacity message to the limiter having the lowest number of running jobs\n -- the tiebreaker is the limiter having not registered a job in the longest time\n\n local lowest_concurrency_value = nil\n local lowest_concurrency_clients = {}\n local lowest_concurrency_last_registered = {}\n local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores')\n\n for i = 1, #client_concurrencies, 2 do\n local client = client_concurrencies[i]\n local concurrency = tonumber(client_concurrencies[i+1])\n\n if (\n lowest_concurrency_value == nil or lowest_concurrency_value == concurrency\n ) and (\n not unresponsive_lookup[client]\n ) and (\n tonumber(redis.call('hget', client_num_queued_key, client)) > 0\n ) then\n lowest_concurrency_value = concurrency\n table.insert(lowest_concurrency_clients, client)\n local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client))\n table.insert(lowest_concurrency_last_registered, last_registered)\n end\n end\n\n if #lowest_concurrency_clients > 0 then\n local position = 1\n local earliest = lowest_concurrency_last_registered[1]\n\n for i,v in ipairs(lowest_concurrency_last_registered) do\n if v < earliest then\n position = i\n earliest = v\n end\n end\n\n local next_client = lowest_concurrency_clients[position]\n redis.call('publish', 'b_'..id,\n 'capacity-priority:'..(final_capacity or '')..\n ':'..next_client..\n ':'..capacityPriorityCounter\n )\n redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1')\n else\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n end\n end\n\n return {\n ['capacity'] = final_capacity,\n ['running'] = running,\n ['reservoir'] = reservoir\n }\nend\n",
+ "queued.lua": "local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout'))\nlocal valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf')\nlocal client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients))\n\nlocal sum = 0\nfor i = 1, #client_queued do\n sum = sum + tonumber(client_queued[i])\nend\n\nreturn sum\n",
+ "refresh_expiration.lua": "local refresh_expiration = function (now, nextRequest, groupTimeout)\n\n if groupTimeout ~= nil then\n local ttl = (nextRequest + groupTimeout) - now\n\n for i = 1, #KEYS do\n redis.call('pexpire', KEYS[i], ttl)\n end\n end\n\nend\n",
+ "refs.lua": "local settings_key = KEYS[1]\nlocal job_weights_key = KEYS[2]\nlocal job_expirations_key = KEYS[3]\nlocal job_clients_key = KEYS[4]\nlocal client_running_key = KEYS[5]\nlocal client_num_queued_key = KEYS[6]\nlocal client_last_registered_key = KEYS[7]\nlocal client_last_seen_key = KEYS[8]\n\nlocal now = tonumber(ARGV[1])\nlocal client = ARGV[2]\n\nlocal num_static_argv = 2\n",
+ "register.lua": "local index = ARGV[num_static_argv + 1]\nlocal weight = tonumber(ARGV[num_static_argv + 2])\nlocal expiration = tonumber(ARGV[num_static_argv + 3])\n\nlocal state = process_tick(now, false)\nlocal capacity = state['capacity']\nlocal reservoir = state['reservoir']\n\nlocal settings = redis.call('hmget', settings_key,\n 'nextRequest',\n 'minTime',\n 'groupTimeout'\n)\nlocal nextRequest = tonumber(settings[1])\nlocal minTime = tonumber(settings[2])\nlocal groupTimeout = tonumber(settings[3])\n\nif conditions_check(capacity, weight) then\n\n redis.call('hincrby', settings_key, 'running', weight)\n redis.call('hset', job_weights_key, index, weight)\n if expiration ~= nil then\n redis.call('zadd', job_expirations_key, now + expiration, index)\n end\n redis.call('hset', job_clients_key, index, client)\n redis.call('zincrby', client_running_key, weight, client)\n redis.call('hincrby', client_num_queued_key, client, -1)\n redis.call('zadd', client_last_registered_key, now, client)\n\n local wait = math.max(nextRequest - now, 0)\n local newNextRequest = now + wait + minTime\n\n if reservoir == nil then\n redis.call('hset', settings_key,\n 'nextRequest', newNextRequest\n )\n else\n reservoir = reservoir - weight\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'nextRequest', newNextRequest\n )\n end\n\n refresh_expiration(now, newNextRequest, groupTimeout)\n\n return {true, wait, reservoir}\n\nelse\n return {false}\nend\n",
+ "register_client.lua": "local queued = tonumber(ARGV[num_static_argv + 1])\n\n-- Could have been re-registered concurrently\nif not redis.call('zscore', client_last_seen_key, client) then\n redis.call('zadd', client_running_key, 0, client)\n redis.call('hset', client_num_queued_key, client, queued)\n redis.call('zadd', client_last_registered_key, 0, client)\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n\nreturn {}\n",
+ "running.lua": "return process_tick(now, false)['running']\n",
+ "submit.lua": "local queueLength = tonumber(ARGV[num_static_argv + 1])\nlocal weight = tonumber(ARGV[num_static_argv + 2])\n\nlocal capacity = process_tick(now, false)['capacity']\n\nlocal settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'highWater',\n 'nextRequest',\n 'strategy',\n 'unblockTime',\n 'penalty',\n 'minTime',\n 'groupTimeout'\n)\nlocal id = settings[1]\nlocal maxConcurrent = tonumber(settings[2])\nlocal highWater = tonumber(settings[3])\nlocal nextRequest = tonumber(settings[4])\nlocal strategy = tonumber(settings[5])\nlocal unblockTime = tonumber(settings[6])\nlocal penalty = tonumber(settings[7])\nlocal minTime = tonumber(settings[8])\nlocal groupTimeout = tonumber(settings[9])\n\nif maxConcurrent ~= nil and weight > maxConcurrent then\n return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent)\nend\n\nlocal reachedHWM = (highWater ~= nil and queueLength == highWater\n and not (\n conditions_check(capacity, weight)\n and nextRequest - now <= 0\n )\n)\n\nlocal blocked = strategy == 3 and (reachedHWM or unblockTime >= now)\n\nif blocked then\n local computedPenalty = penalty\n if computedPenalty == nil then\n if minTime == 0 then\n computedPenalty = 5000\n else\n computedPenalty = 15 * minTime\n end\n end\n\n local newNextRequest = now + computedPenalty + minTime\n\n redis.call('hmset', settings_key,\n 'unblockTime', now + computedPenalty,\n 'nextRequest', newNextRequest\n )\n\n local clients_queued_reset = redis.call('hkeys', client_num_queued_key)\n local queued_reset = {}\n for i = 1, #clients_queued_reset do\n table.insert(queued_reset, clients_queued_reset[i])\n table.insert(queued_reset, 0)\n end\n redis.call('hmset', client_num_queued_key, unpack(queued_reset))\n\n redis.call('publish', 'b_'..id, 'blocked:')\n\n refresh_expiration(now, newNextRequest, groupTimeout)\nend\n\nif not blocked and not reachedHWM then\n redis.call('hincrby', client_num_queued_key, client, 1)\nend\n\nreturn {reachedHWM, blocked, strategy}\n",
+ "update_settings.lua": "local args = {'hmset', settings_key}\n\nfor i = num_static_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\nend\n\nredis.call(unpack(args))\n\nprocess_tick(now, true)\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n",
+ "validate_client.lua": "if not redis.call('zscore', client_last_seen_key, client) then\n return redis.error_reply('UNKNOWN_CLIENT')\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n",
+ "validate_keys.lua": "if not (redis.call('exists', settings_key) == 1) then\n return redis.error_reply('SETTINGS_KEY_NOT_FOUND')\nend\n"
+ };
+
+ var lua$1 = /*#__PURE__*/Object.freeze({
+ default: lua
+ });
+
+ var require$$0 = getCjsExportFromNamespace(lua$1);
+
+ var Scripts = createCommonjsModule(function (module, exports) {
+ var headers, lua, templates;
+ lua = require$$0;
+ headers = {
+ refs: lua["refs.lua"],
+ validate_keys: lua["validate_keys.lua"],
+ validate_client: lua["validate_client.lua"],
+ refresh_expiration: lua["refresh_expiration.lua"],
+ process_tick: lua["process_tick.lua"],
+ conditions_check: lua["conditions_check.lua"],
+ get_time: lua["get_time.lua"]
+ };
+
+ exports.allKeys = function (id) {
+ return [
+ /*
+ HASH
+ */
+ "b_".concat(id, "_settings"),
+ /*
+ HASH
+ job index -> weight
+ */
+ "b_".concat(id, "_job_weights"),
+ /*
+ ZSET
+ job index -> expiration
+ */
+ "b_".concat(id, "_job_expirations"),
+ /*
+ HASH
+ job index -> client
+ */
+ "b_".concat(id, "_job_clients"),
+ /*
+ ZSET
+ client -> sum running
+ */
+ "b_".concat(id, "_client_running"),
+ /*
+ HASH
+ client -> num queued
+ */
+ "b_".concat(id, "_client_num_queued"),
+ /*
+ ZSET
+ client -> last job registered
+ */
+ "b_".concat(id, "_client_last_registered"),
+ /*
+ ZSET
+ client -> last seen
+ */
+ "b_".concat(id, "_client_last_seen")];
+ };
+
+ templates = {
+ init: {
+ keys: exports.allKeys,
+ headers: ["process_tick"],
+ refresh_expiration: true,
+ code: lua["init.lua"]
+ },
+ group_check: {
+ keys: exports.allKeys,
+ headers: [],
+ refresh_expiration: false,
+ code: lua["group_check.lua"]
+ },
+ register_client: {
+ keys: exports.allKeys,
+ headers: ["validate_keys"],
+ refresh_expiration: false,
+ code: lua["register_client.lua"]
+ },
+ blacklist_client: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client"],
+ refresh_expiration: false,
+ code: lua["blacklist_client.lua"]
+ },
+ heartbeat: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["heartbeat.lua"]
+ },
+ update_settings: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: true,
+ code: lua["update_settings.lua"]
+ },
+ running: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["running.lua"]
+ },
+ queued: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client"],
+ refresh_expiration: false,
+ code: lua["queued.lua"]
+ },
+ done: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["done.lua"]
+ },
+ check: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
+ refresh_expiration: false,
+ code: lua["check.lua"]
+ },
+ submit: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
+ refresh_expiration: true,
+ code: lua["submit.lua"]
+ },
+ register: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
+ refresh_expiration: true,
+ code: lua["register.lua"]
+ },
+ free: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: true,
+ code: lua["free.lua"]
+ },
+ current_reservoir: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["current_reservoir.lua"]
+ },
+ increment_reservoir: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: true,
+ code: lua["increment_reservoir.lua"]
+ }
+ };
+ exports.names = Object.keys(templates);
+
+ exports.keys = function (name, id) {
+ return templates[name].keys(id);
+ };
+
+ exports.payload = function (name) {
+ var template;
+ template = templates[name];
+ return Array.prototype.concat(headers.refs, template.headers.map(function (h) {
+ return headers[h];
+ }), template.refresh_expiration ? headers.refresh_expiration : "", template.code).join("\n");
+ };
+ });
+ var Scripts_1 = Scripts.allKeys;
+ var Scripts_2 = Scripts.names;
+ var Scripts_3 = Scripts.keys;
+ var Scripts_4 = Scripts.payload;
+
+ var Events$2, RedisConnection, Scripts$1, parser$3;
+ parser$3 = parser;
+ Events$2 = Events_1;
+ Scripts$1 = Scripts;
+
+ RedisConnection = function () {
+ var RedisConnection =
+ /*#__PURE__*/
+ function () {
+ function RedisConnection() {
+ var _this = this;
+
+ var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
+
+ _classCallCheck(this, RedisConnection);
+
+ parser$3.load(options, this.defaults, this);
+
+ if (this.Redis == null) {
+ this.Redis = eval("require")("redis"); // Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option.
+ }
+
+ if (this.Events == null) {
+ this.Events = new Events$2(this);
+ }
+
+ this.terminated = false;
+
+ if (this.client == null) {
+ this.client = this.Redis.createClient(this.clientOptions);
+ }
+
+ this.subscriber = this.client.duplicate();
+ this.limiters = {};
+ this.shas = {};
+ this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(function () {
+ return _this._loadScripts();
+ }).then(function () {
+ return {
+ client: _this.client,
+ subscriber: _this.subscriber
+ };
+ });
+ }
+
+ _createClass(RedisConnection, [{
+ key: "_setup",
+ value: function _setup(client, sub) {
+ var _this2 = this;
+
+ client.setMaxListeners(0);
+ return new this.Promise(function (resolve, reject) {
+ client.on("error", function (e) {
+ return _this2.Events.trigger("error", e);
+ });
+
+ if (sub) {
+ client.on("message", function (channel, message) {
+ var ref;
+ return (ref = _this2.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
+ });
+ }
+
+ if (client.ready) {
+ return resolve();
+ } else {
+ return client.once("ready", resolve);
+ }
+ });
+ }
+ }, {
+ key: "_loadScript",
+ value: function _loadScript(name) {
+ var _this3 = this;
+
+ return new this.Promise(function (resolve, reject) {
+ var payload;
+ payload = Scripts$1.payload(name);
+ return _this3.client.multi([["script", "load", payload]]).exec(function (err, replies) {
+ if (err != null) {
+ return reject(err);
+ }
+
+ _this3.shas[name] = replies[0];
+ return resolve(replies[0]);
+ });
+ });
+ }
+ }, {
+ key: "_loadScripts",
+ value: function _loadScripts() {
+ var _this4 = this;
+
+ return this.Promise.all(Scripts$1.names.map(function (k) {
+ return _this4._loadScript(k);
+ }));
+ }
+ }, {
+ key: "__runCommand__",
+ value: function () {
+ var _runCommand__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee(cmd) {
+ var _this5 = this;
+
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ _context.next = 2;
+ return this.ready;
+
+ case 2:
+ return _context.abrupt("return", new this.Promise(function (resolve, reject) {
+ return _this5.client.multi([cmd]).exec_atomic(function (err, replies) {
+ if (err != null) {
+ return reject(err);
+ } else {
+ return resolve(replies[0]);
+ }
+ });
+ }));
+
+ case 3:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, this);
+ }));
+
+ function __runCommand__(_x) {
+ return _runCommand__.apply(this, arguments);
+ }
+
+ return __runCommand__;
+ }()
+ }, {
+ key: "__addLimiter__",
+ value: function __addLimiter__(instance) {
+ var _this6 = this;
+
+ return this.Promise.all([instance.channel(), instance.channel_client()].map(function (channel) {
+ return new _this6.Promise(function (resolve, reject) {
+ var _handler;
+
+ _handler = function handler(chan) {
+ if (chan === channel) {
+ _this6.subscriber.removeListener("subscribe", _handler);
+
+ _this6.limiters[channel] = instance;
+ return resolve();
+ }
+ };
+
+ _this6.subscriber.on("subscribe", _handler);
+
+ return _this6.subscriber.subscribe(channel);
+ });
+ }));
+ }
+ }, {
+ key: "__removeLimiter__",
+ value: function __removeLimiter__(instance) {
+ var _this7 = this;
+
+ return this.Promise.all([instance.channel(), instance.channel_client()].map(
+ /*#__PURE__*/
+ function () {
+ var _ref = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2(channel) {
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ if (_this7.terminated) {
+ _context2.next = 3;
+ break;
+ }
+
+ _context2.next = 3;
+ return new _this7.Promise(function (resolve, reject) {
+ return _this7.subscriber.unsubscribe(channel, function (err, chan) {
+ if (err != null) {
+ return reject(err);
+ }
+
+ if (chan === channel) {
+ return resolve();
+ }
+ });
+ });
+
+ case 3:
+ return _context2.abrupt("return", delete _this7.limiters[channel]);
+
+ case 4:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2);
+ }));
+
+ return function (_x2) {
+ return _ref.apply(this, arguments);
+ };
+ }()));
+ }
+ }, {
+ key: "__scriptArgs__",
+ value: function __scriptArgs__(name, id, args, cb) {
+ var keys;
+ keys = Scripts$1.keys(name, id);
+ return [this.shas[name], keys.length].concat(keys, args, cb);
+ }
+ }, {
+ key: "__scriptFn__",
+ value: function __scriptFn__(name) {
+ return this.client.evalsha.bind(this.client);
+ }
+ }, {
+ key: "disconnect",
+ value: function disconnect() {
+ var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true;
+ var i, k, len, ref;
+ ref = Object.keys(this.limiters);
+
+ for (i = 0, len = ref.length; i < len; i++) {
+ k = ref[i];
+ clearInterval(this.limiters[k]._store.heartbeat);
+ }
+
+ this.limiters = {};
+ this.terminated = true;
+ this.client.end(flush);
+ this.subscriber.end(flush);
+ return this.Promise.resolve();
+ }
+ }]);
+
+ return RedisConnection;
+ }();
+ RedisConnection.prototype.datastore = "redis";
+ RedisConnection.prototype.defaults = {
+ Redis: null,
+ clientOptions: {},
+ client: null,
+ Promise: Promise,
+ Events: null
+ };
+ return RedisConnection;
+ }.call(commonjsGlobal);
+
+ var RedisConnection_1 = RedisConnection;
+
+ var Events$3, IORedisConnection, Scripts$2, parser$4;
+ parser$4 = parser;
+ Events$3 = Events_1;
+ Scripts$2 = Scripts;
+
+ IORedisConnection = function () {
+ var IORedisConnection =
+ /*#__PURE__*/
+ function () {
+ function IORedisConnection() {
+ var _this = this;
+
+ var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
+
+ _classCallCheck(this, IORedisConnection);
+
+ parser$4.load(options, this.defaults, this);
+
+ if (this.Redis == null) {
+ this.Redis = eval("require")("ioredis"); // Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option.
+ }
+
+ if (this.Events == null) {
+ this.Events = new Events$3(this);
+ }
+
+ this.terminated = false;
+
+ if (this.clusterNodes != null) {
+ this.client = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
+ this.subscriber = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
+ } else if (this.client != null && this.client.duplicate == null) {
+ this.subscriber = new this.Redis.Cluster(this.client.startupNodes, this.client.options);
+ } else {
+ if (this.client == null) {
+ this.client = new this.Redis(this.clientOptions);
+ }
+
+ this.subscriber = this.client.duplicate();
+ }
+
+ this.limiters = {};
+ this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(function () {
+ _this._loadScripts();
+
+ return {
+ client: _this.client,
+ subscriber: _this.subscriber
+ };
+ });
+ }
+
+ _createClass(IORedisConnection, [{
+ key: "_setup",
+ value: function _setup(client, sub) {
+ var _this2 = this;
+
+ client.setMaxListeners(0);
+ return new this.Promise(function (resolve, reject) {
+ client.on("error", function (e) {
+ return _this2.Events.trigger("error", e);
+ });
+
+ if (sub) {
+ client.on("message", function (channel, message) {
+ var ref;
+ return (ref = _this2.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
+ });
+ }
+
+ if (client.status === "ready") {
+ return resolve();
+ } else {
+ return client.once("ready", resolve);
+ }
+ });
+ }
+ }, {
+ key: "_loadScripts",
+ value: function _loadScripts() {
+ var _this3 = this;
+
+ return Scripts$2.names.forEach(function (name) {
+ return _this3.client.defineCommand(name, {
+ lua: Scripts$2.payload(name)
+ });
+ });
+ }
+ }, {
+ key: "__runCommand__",
+ value: function () {
+ var _runCommand__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee(cmd) {
+ var _, deleted, _ref, _ref2, _ref2$;
+
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ _context.next = 2;
+ return this.ready;
+
+ case 2:
+ _context.next = 4;
+ return this.client.pipeline([cmd]).exec();
+
+ case 4:
+ _ref = _context.sent;
+ _ref2 = _slicedToArray(_ref, 1);
+ _ref2$ = _slicedToArray(_ref2[0], 2);
+ _ = _ref2$[0];
+ deleted = _ref2$[1];
+ return _context.abrupt("return", deleted);
+
+ case 10:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, this);
+ }));
+
+ function __runCommand__(_x) {
+ return _runCommand__.apply(this, arguments);
+ }
+
+ return __runCommand__;
+ }()
+ }, {
+ key: "__addLimiter__",
+ value: function __addLimiter__(instance) {
+ var _this4 = this;
+
+ return this.Promise.all([instance.channel(), instance.channel_client()].map(function (channel) {
+ return new _this4.Promise(function (resolve, reject) {
+ return _this4.subscriber.subscribe(channel, function () {
+ _this4.limiters[channel] = instance;
+ return resolve();
+ });
+ });
+ }));
+ }
+ }, {
+ key: "__removeLimiter__",
+ value: function __removeLimiter__(instance) {
+ var _this5 = this;
+
+ return [instance.channel(), instance.channel_client()].forEach(
+ /*#__PURE__*/
+ function () {
+ var _ref3 = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2(channel) {
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ if (_this5.terminated) {
+ _context2.next = 3;
+ break;
+ }
+
+ _context2.next = 3;
+ return _this5.subscriber.unsubscribe(channel);
+
+ case 3:
+ return _context2.abrupt("return", delete _this5.limiters[channel]);
+
+ case 4:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2);
+ }));
+
+ return function (_x2) {
+ return _ref3.apply(this, arguments);
+ };
+ }());
+ }
+ }, {
+ key: "__scriptArgs__",
+ value: function __scriptArgs__(name, id, args, cb) {
+ var keys;
+ keys = Scripts$2.keys(name, id);
+ return [keys.length].concat(keys, args, cb);
+ }
+ }, {
+ key: "__scriptFn__",
+ value: function __scriptFn__(name) {
+ return this.client[name].bind(this.client);
+ }
+ }, {
+ key: "disconnect",
+ value: function disconnect() {
+ var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true;
+ var i, k, len, ref;
+ ref = Object.keys(this.limiters);
+
+ for (i = 0, len = ref.length; i < len; i++) {
+ k = ref[i];
+ clearInterval(this.limiters[k]._store.heartbeat);
+ }
+
+ this.limiters = {};
+ this.terminated = true;
+
+ if (flush) {
+ return this.Promise.all([this.client.quit(), this.subscriber.quit()]);
+ } else {
+ this.client.disconnect();
+ this.subscriber.disconnect();
+ return this.Promise.resolve();
+ }
+ }
+ }]);
+
+ return IORedisConnection;
+ }();
+ IORedisConnection.prototype.datastore = "ioredis";
+ IORedisConnection.prototype.defaults = {
+ Redis: null,
+ clientOptions: {},
+ clusterNodes: null,
+ client: null,
+ Promise: Promise,
+ Events: null
+ };
+ return IORedisConnection;
+ }.call(commonjsGlobal);
+
+ var IORedisConnection_1 = IORedisConnection;
+
+ var BottleneckError$3, IORedisConnection$1, RedisConnection$1, RedisDatastore, parser$5;
+ parser$5 = parser;
+ BottleneckError$3 = BottleneckError_1;
+ RedisConnection$1 = RedisConnection_1;
+ IORedisConnection$1 = IORedisConnection_1;
+
+ RedisDatastore =
+ /*#__PURE__*/
+ function () {
+ function RedisDatastore(instance, storeOptions, storeInstanceOptions) {
+ var _this = this;
+
+ _classCallCheck(this, RedisDatastore);
+
+ this.instance = instance;
+ this.storeOptions = storeOptions;
+ this.originalId = this.instance.id;
+ this.clientId = this.instance._randomIndex();
+ parser$5.load(storeInstanceOptions, storeInstanceOptions, this);
+ this.clients = {};
+ this.capacityPriorityCounters = {};
+ this.sharedConnection = this.connection != null;
+
+ if (this.connection == null) {
+ this.connection = this.instance.datastore === "redis" ? new RedisConnection$1({
+ Redis: this.Redis,
+ clientOptions: this.clientOptions,
+ Promise: this.Promise,
+ Events: this.instance.Events
+ }) : this.instance.datastore === "ioredis" ? new IORedisConnection$1({
+ Redis: this.Redis,
+ clientOptions: this.clientOptions,
+ clusterNodes: this.clusterNodes,
+ Promise: this.Promise,
+ Events: this.instance.Events
+ }) : void 0;
+ }
+
+ this.instance.connection = this.connection;
+ this.instance.datastore = this.connection.datastore;
+ this.ready = this.connection.ready.then(function (clients) {
+ _this.clients = clients;
+ return _this.runScript("init", _this.prepareInitSettings(_this.clearDatastore));
+ }).then(function () {
+ return _this.connection.__addLimiter__(_this.instance);
+ }).then(function () {
+ return _this.runScript("register_client", [_this.instance.queued()]);
+ }).then(function () {
+ var base;
+
+ if (typeof (base = _this.heartbeat = setInterval(function () {
+ return _this.runScript("heartbeat", [])["catch"](function (e) {
+ return _this.instance.Events.trigger("error", e);
+ });
+ }, _this.heartbeatInterval)).unref === "function") {
+ base.unref();
+ }
+
+ return _this.clients;
+ });
+ }
+
+ _createClass(RedisDatastore, [{
+ key: "__publish__",
+ value: function () {
+ var _publish__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee(message) {
+ var client, _ref;
+
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ _context.next = 2;
+ return this.ready;
+
+ case 2:
+ _ref = _context.sent;
+ client = _ref.client;
+ return _context.abrupt("return", client.publish(this.instance.channel(), "message:".concat(message.toString())));
+
+ case 5:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, this);
+ }));
+
+ function __publish__(_x) {
+ return _publish__.apply(this, arguments);
+ }
+
+ return __publish__;
+ }()
+ }, {
+ key: "onMessage",
+ value: function () {
+ var _onMessage = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee3(channel, message) {
+ var _this2 = this;
+
+ var capacity, counter, data, drained, e, newCapacity, pos, priorityClient, rawCapacity, type, _ref2, _data$split, _data$split2;
+
+ return regeneratorRuntime.wrap(function _callee3$(_context3) {
+ while (1) {
+ switch (_context3.prev = _context3.next) {
+ case 0:
+ _context3.prev = 0;
+ pos = message.indexOf(":");
+ _ref2 = [message.slice(0, pos), message.slice(pos + 1)];
+ type = _ref2[0];
+ data = _ref2[1];
+
+ if (!(type === "capacity")) {
+ _context3.next = 11;
+ break;
+ }
+
+ _context3.next = 8;
+ return this.instance._drainAll(data.length > 0 ? ~~data : void 0);
+
+ case 8:
+ return _context3.abrupt("return", _context3.sent);
+
+ case 11:
+ if (!(type === "capacity-priority")) {
+ _context3.next = 37;
+ break;
+ }
+
+ _data$split = data.split(":");
+ _data$split2 = _slicedToArray(_data$split, 3);
+ rawCapacity = _data$split2[0];
+ priorityClient = _data$split2[1];
+ counter = _data$split2[2];
+ capacity = rawCapacity.length > 0 ? ~~rawCapacity : void 0;
+
+ if (!(priorityClient === this.clientId)) {
+ _context3.next = 28;
+ break;
+ }
+
+ _context3.next = 21;
+ return this.instance._drainAll(capacity);
+
+ case 21:
+ drained = _context3.sent;
+ newCapacity = capacity != null ? capacity - (drained || 0) : "";
+ _context3.next = 25;
+ return this.clients.client.publish(this.instance.channel(), "capacity-priority:".concat(newCapacity, "::").concat(counter));
+
+ case 25:
+ return _context3.abrupt("return", _context3.sent);
+
+ case 28:
+ if (!(priorityClient === "")) {
+ _context3.next = 34;
+ break;
+ }
+
+ clearTimeout(this.capacityPriorityCounters[counter]);
+ delete this.capacityPriorityCounters[counter];
+ return _context3.abrupt("return", this.instance._drainAll(capacity));
+
+ case 34:
+ return _context3.abrupt("return", this.capacityPriorityCounters[counter] = setTimeout(
+ /*#__PURE__*/
+ _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2() {
+ var e;
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ _context2.prev = 0;
+ delete _this2.capacityPriorityCounters[counter];
+ _context2.next = 4;
+ return _this2.runScript("blacklist_client", [priorityClient]);
+
+ case 4:
+ _context2.next = 6;
+ return _this2.instance._drainAll(capacity);
+
+ case 6:
+ return _context2.abrupt("return", _context2.sent);
+
+ case 9:
+ _context2.prev = 9;
+ _context2.t0 = _context2["catch"](0);
+ e = _context2.t0;
+ return _context2.abrupt("return", _this2.instance.Events.trigger("error", e));
+
+ case 13:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2, null, [[0, 9]]);
+ })), 1000));
+
+ case 35:
+ _context3.next = 45;
+ break;
+
+ case 37:
+ if (!(type === "message")) {
+ _context3.next = 41;
+ break;
+ }
+
+ return _context3.abrupt("return", this.instance.Events.trigger("message", data));
+
+ case 41:
+ if (!(type === "blocked")) {
+ _context3.next = 45;
+ break;
+ }
+
+ _context3.next = 44;
+ return this.instance._dropAllQueued();
+
+ case 44:
+ return _context3.abrupt("return", _context3.sent);
+
+ case 45:
+ _context3.next = 51;
+ break;
+
+ case 47:
+ _context3.prev = 47;
+ _context3.t0 = _context3["catch"](0);
+ e = _context3.t0;
+ return _context3.abrupt("return", this.instance.Events.trigger("error", e));
+
+ case 51:
+ case "end":
+ return _context3.stop();
+ }
+ }
+ }, _callee3, this, [[0, 47]]);
+ }));
+
+ function onMessage(_x2, _x3) {
+ return _onMessage.apply(this, arguments);
+ }
+
+ return onMessage;
+ }()
+ }, {
+ key: "__disconnect__",
+ value: function __disconnect__(flush) {
+ clearInterval(this.heartbeat);
+
+ if (this.sharedConnection) {
+ return this.connection.__removeLimiter__(this.instance);
+ } else {
+ return this.connection.disconnect(flush);
+ }
+ }
+ }, {
+ key: "runScript",
+ value: function () {
+ var _runScript = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee4(name, args) {
+ var _this3 = this;
+
+ return regeneratorRuntime.wrap(function _callee4$(_context4) {
+ while (1) {
+ switch (_context4.prev = _context4.next) {
+ case 0:
+ if (name === "init" || name === "register_client") {
+ _context4.next = 3;
+ break;
+ }
+
+ _context4.next = 3;
+ return this.ready;
+
+ case 3:
+ return _context4.abrupt("return", new this.Promise(function (resolve, reject) {
+ var all_args, arr;
+ all_args = [Date.now(), _this3.clientId].concat(args);
+
+ _this3.instance.Events.trigger("debug", "Calling Redis script: ".concat(name, ".lua"), all_args);
+
+ arr = _this3.connection.__scriptArgs__(name, _this3.originalId, all_args, function (err, replies) {
+ if (err != null) {
+ return reject(err);
+ }
+
+ return resolve(replies);
+ });
+ return _this3.connection.__scriptFn__(name).apply(void 0, _toConsumableArray(arr));
+ })["catch"](function (e) {
+ if (e.message === "SETTINGS_KEY_NOT_FOUND") {
+ if (name === "heartbeat") {
+ return _this3.Promise.resolve();
+ } else {
+ return _this3.runScript("init", _this3.prepareInitSettings(false)).then(function () {
+ return _this3.runScript(name, args);
+ });
+ }
+ } else if (e.message === "UNKNOWN_CLIENT") {
+ return _this3.runScript("register_client", [_this3.instance.queued()]).then(function () {
+ return _this3.runScript(name, args);
+ });
+ } else {
+ return _this3.Promise.reject(e);
+ }
+ }));
+
+ case 4:
+ case "end":
+ return _context4.stop();
+ }
+ }
+ }, _callee4, this);
+ }));
+
+ function runScript(_x4, _x5) {
+ return _runScript.apply(this, arguments);
+ }
+
+ return runScript;
+ }()
+ }, {
+ key: "prepareArray",
+ value: function prepareArray(arr) {
+ var i, len, results, x;
+ results = [];
+
+ for (i = 0, len = arr.length; i < len; i++) {
+ x = arr[i];
+ results.push(x != null ? x.toString() : "");
+ }
+
+ return results;
+ }
+ }, {
+ key: "prepareObject",
+ value: function prepareObject(obj) {
+ var arr, k, v;
+ arr = [];
+
+ for (k in obj) {
+ v = obj[k];
+ arr.push(k, v != null ? v.toString() : "");
+ }
+
+ return arr;
+ }
+ }, {
+ key: "prepareInitSettings",
+ value: function prepareInitSettings(clear) {
+ var args;
+ args = this.prepareObject(Object.assign({}, this.storeOptions, {
+ id: this.originalId,
+ version: this.instance.version,
+ groupTimeout: this.timeout,
+ clientTimeout: this.clientTimeout
+ }));
+ args.unshift(clear ? 1 : 0, this.instance.version);
+ return args;
+ }
+ }, {
+ key: "convertBool",
+ value: function convertBool(b) {
+ return !!b;
+ }
+ }, {
+ key: "__updateSettings__",
+ value: function () {
+ var _updateSettings__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee5(options) {
+ return regeneratorRuntime.wrap(function _callee5$(_context5) {
+ while (1) {
+ switch (_context5.prev = _context5.next) {
+ case 0:
+ _context5.next = 2;
+ return this.runScript("update_settings", this.prepareObject(options));
+
+ case 2:
+ return _context5.abrupt("return", parser$5.overwrite(options, options, this.storeOptions));
+
+ case 3:
+ case "end":
+ return _context5.stop();
+ }
+ }
+ }, _callee5, this);
+ }));
+
+ function __updateSettings__(_x6) {
+ return _updateSettings__.apply(this, arguments);
+ }
+
+ return __updateSettings__;
+ }()
+ }, {
+ key: "__running__",
+ value: function __running__() {
+ return this.runScript("running", []);
+ }
+ }, {
+ key: "__queued__",
+ value: function __queued__() {
+ return this.runScript("queued", []);
+ }
+ }, {
+ key: "__done__",
+ value: function __done__() {
+ return this.runScript("done", []);
+ }
+ }, {
+ key: "__groupCheck__",
+ value: function () {
+ var _groupCheck__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee6() {
+ return regeneratorRuntime.wrap(function _callee6$(_context6) {
+ while (1) {
+ switch (_context6.prev = _context6.next) {
+ case 0:
+ _context6.t0 = this;
+ _context6.next = 3;
+ return this.runScript("group_check", []);
+
+ case 3:
+ _context6.t1 = _context6.sent;
+ return _context6.abrupt("return", _context6.t0.convertBool.call(_context6.t0, _context6.t1));
+
+ case 5:
+ case "end":
+ return _context6.stop();
+ }
+ }
+ }, _callee6, this);
+ }));
+
+ function __groupCheck__() {
+ return _groupCheck__.apply(this, arguments);
+ }
+
+ return __groupCheck__;
+ }()
+ }, {
+ key: "__incrementReservoir__",
+ value: function __incrementReservoir__(incr) {
+ return this.runScript("increment_reservoir", [incr]);
+ }
+ }, {
+ key: "__currentReservoir__",
+ value: function __currentReservoir__() {
+ return this.runScript("current_reservoir", []);
+ }
+ }, {
+ key: "__check__",
+ value: function () {
+ var _check__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee7(weight) {
+ return regeneratorRuntime.wrap(function _callee7$(_context7) {
+ while (1) {
+ switch (_context7.prev = _context7.next) {
+ case 0:
+ _context7.t0 = this;
+ _context7.next = 3;
+ return this.runScript("check", this.prepareArray([weight]));
+
+ case 3:
+ _context7.t1 = _context7.sent;
+ return _context7.abrupt("return", _context7.t0.convertBool.call(_context7.t0, _context7.t1));
+
+ case 5:
+ case "end":
+ return _context7.stop();
+ }
+ }
+ }, _callee7, this);
+ }));
+
+ function __check__(_x7) {
+ return _check__.apply(this, arguments);
+ }
+
+ return __check__;
+ }()
+ }, {
+ key: "__register__",
+ value: function () {
+ var _register__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee8(index, weight, expiration) {
+ var reservoir, success, wait, _ref4, _ref5;
+
+ return regeneratorRuntime.wrap(function _callee8$(_context8) {
+ while (1) {
+ switch (_context8.prev = _context8.next) {
+ case 0:
+ _context8.next = 2;
+ return this.runScript("register", this.prepareArray([index, weight, expiration]));
+
+ case 2:
+ _ref4 = _context8.sent;
+ _ref5 = _slicedToArray(_ref4, 3);
+ success = _ref5[0];
+ wait = _ref5[1];
+ reservoir = _ref5[2];
+ return _context8.abrupt("return", {
+ success: this.convertBool(success),
+ wait: wait,
+ reservoir: reservoir
+ });
+
+ case 8:
+ case "end":
+ return _context8.stop();
+ }
+ }
+ }, _callee8, this);
+ }));
+
+ function __register__(_x8, _x9, _x10) {
+ return _register__.apply(this, arguments);
+ }
+
+ return __register__;
+ }()
+ }, {
+ key: "__submit__",
+ value: function () {
+ var _submit__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee9(queueLength, weight) {
+ var blocked, e, maxConcurrent, overweight, reachedHWM, strategy, _ref6, _ref7, _e$message$split, _e$message$split2;
+
+ return regeneratorRuntime.wrap(function _callee9$(_context9) {
+ while (1) {
+ switch (_context9.prev = _context9.next) {
+ case 0:
+ _context9.prev = 0;
+ _context9.next = 3;
+ return this.runScript("submit", this.prepareArray([queueLength, weight]));
+
+ case 3:
+ _ref6 = _context9.sent;
+ _ref7 = _slicedToArray(_ref6, 3);
+ reachedHWM = _ref7[0];
+ blocked = _ref7[1];
+ strategy = _ref7[2];
+ return _context9.abrupt("return", {
+ reachedHWM: this.convertBool(reachedHWM),
+ blocked: this.convertBool(blocked),
+ strategy: strategy
+ });
+
+ case 11:
+ _context9.prev = 11;
+ _context9.t0 = _context9["catch"](0);
+ e = _context9.t0;
+
+ if (!(e.message.indexOf("OVERWEIGHT") === 0)) {
+ _context9.next = 23;
+ break;
+ }
+
+ _e$message$split = e.message.split(":");
+ _e$message$split2 = _slicedToArray(_e$message$split, 3);
+ overweight = _e$message$split2[0];
+ weight = _e$message$split2[1];
+ maxConcurrent = _e$message$split2[2];
+ throw new BottleneckError$3("Impossible to add a job having a weight of ".concat(weight, " to a limiter having a maxConcurrent setting of ").concat(maxConcurrent));
+
+ case 23:
+ throw e;
+
+ case 24:
+ case "end":
+ return _context9.stop();
+ }
+ }
+ }, _callee9, this, [[0, 11]]);
+ }));
+
+ function __submit__(_x11, _x12) {
+ return _submit__.apply(this, arguments);
+ }
+
+ return __submit__;
+ }()
+ }, {
+ key: "__free__",
+ value: function () {
+ var _free__ = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee10(index, weight) {
+ var running;
+ return regeneratorRuntime.wrap(function _callee10$(_context10) {
+ while (1) {
+ switch (_context10.prev = _context10.next) {
+ case 0:
+ _context10.next = 2;
+ return this.runScript("free", this.prepareArray([index]));
+
+ case 2:
+ running = _context10.sent;
+ return _context10.abrupt("return", {
+ running: running
+ });
+
+ case 4:
+ case "end":
+ return _context10.stop();
+ }
+ }
+ }, _callee10, this);
+ }));
+
+ function __free__(_x13, _x14) {
+ return _free__.apply(this, arguments);
+ }
+
+ return __free__;
+ }()
+ }]);
+
+ return RedisDatastore;
+ }();
+
+ var RedisDatastore_1 = RedisDatastore;
+
+ var BottleneckError$4, States;
+ BottleneckError$4 = BottleneckError_1;
+
+ States =
+ /*#__PURE__*/
+ function () {
+ function States(status1) {
+ _classCallCheck(this, States);
+
+ this.status = status1;
+ this._jobs = {};
+ this.counts = this.status.map(function () {
+ return 0;
+ });
+ }
+
+ _createClass(States, [{
+ key: "next",
+ value: function next(id) {
+ var current, next;
+ current = this._jobs[id];
+ next = current + 1;
+
+ if (current != null && next < this.status.length) {
+ this.counts[current]--;
+ this.counts[next]++;
+ return this._jobs[id]++;
+ } else if (current != null) {
+ this.counts[current]--;
+ return delete this._jobs[id];
+ }
+ }
+ }, {
+ key: "start",
+ value: function start(id) {
+ var initial;
+ initial = 0;
+ this._jobs[id] = initial;
+ return this.counts[initial]++;
+ }
+ }, {
+ key: "remove",
+ value: function remove(id) {
+ var current;
+ current = this._jobs[id];
+
+ if (current != null) {
+ this.counts[current]--;
+ delete this._jobs[id];
+ }
+
+ return current != null;
+ }
+ }, {
+ key: "jobStatus",
+ value: function jobStatus(id) {
+ var ref;
+ return (ref = this.status[this._jobs[id]]) != null ? ref : null;
+ }
+ }, {
+ key: "statusJobs",
+ value: function statusJobs(status) {
+ var k, pos, ref, results, v;
+
+ if (status != null) {
+ pos = this.status.indexOf(status);
+
+ if (pos < 0) {
+ throw new BottleneckError$4("status must be one of ".concat(this.status.join(', ')));
+ }
+
+ ref = this._jobs;
+ results = [];
+
+ for (k in ref) {
+ v = ref[k];
+
+ if (v === pos) {
+ results.push(k);
+ }
+ }
+
+ return results;
+ } else {
+ return Object.keys(this._jobs);
+ }
+ }
+ }, {
+ key: "statusCounts",
+ value: function statusCounts() {
+ var _this = this;
+
+ return this.counts.reduce(function (acc, v, i) {
+ acc[_this.status[i]] = v;
+ return acc;
+ }, {});
+ }
+ }]);
+
+ return States;
+ }();
+
+ var States_1 = States;
+
+ var DLList$2, Sync;
+ DLList$2 = DLList_1;
+
+ Sync =
+ /*#__PURE__*/
+ function () {
+ function Sync(name, Promise) {
+ _classCallCheck(this, Sync);
+
+ this.schedule = this.schedule.bind(this);
+ this.name = name;
+ this.Promise = Promise;
+ this._running = 0;
+ this._queue = new DLList$2();
+ }
+
+ _createClass(Sync, [{
+ key: "isEmpty",
+ value: function isEmpty() {
+ return this._queue.length === 0;
+ }
+ }, {
+ key: "_tryToRun",
+ value: function () {
+ var _tryToRun2 = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2() {
+ var args, cb, error, reject, resolve, returned, task, _this$_queue$shift;
+
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ if (!(this._running < 1 && this._queue.length > 0)) {
+ _context2.next = 13;
+ break;
+ }
+
+ this._running++;
+ _this$_queue$shift = this._queue.shift();
+ task = _this$_queue$shift.task;
+ args = _this$_queue$shift.args;
+ resolve = _this$_queue$shift.resolve;
+ reject = _this$_queue$shift.reject;
+ _context2.next = 9;
+ return _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee() {
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ _context.prev = 0;
+ _context.next = 3;
+ return task.apply(void 0, _toConsumableArray(args));
+
+ case 3:
+ returned = _context.sent;
+ return _context.abrupt("return", function () {
+ return resolve(returned);
+ });
+
+ case 7:
+ _context.prev = 7;
+ _context.t0 = _context["catch"](0);
+ error = _context.t0;
+ return _context.abrupt("return", function () {
+ return reject(error);
+ });
+
+ case 11:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, null, [[0, 7]]);
+ }))();
+
+ case 9:
+ cb = _context2.sent;
+ this._running--;
+
+ this._tryToRun();
+
+ return _context2.abrupt("return", cb());
+
+ case 13:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2, this);
+ }));
+
+ function _tryToRun() {
+ return _tryToRun2.apply(this, arguments);
+ }
+
+ return _tryToRun;
+ }()
+ }, {
+ key: "schedule",
+ value: function schedule(task) {
+ var promise, reject, resolve;
+ resolve = reject = null;
+ promise = new this.Promise(function (_resolve, _reject) {
+ resolve = _resolve;
+ return reject = _reject;
+ });
+
+ for (var _len = arguments.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
+ args[_key - 1] = arguments[_key];
+ }
+
+ this._queue.push({
+ task: task,
+ args: args,
+ resolve: resolve,
+ reject: reject
+ });
+
+ this._tryToRun();
+
+ return promise;
+ }
+ }]);
+
+ return Sync;
+ }();
+
+ var Sync_1 = Sync;
+
+ var version = "2.19.5";
+ var version$1 = {
+ version: version
+ };
+
+ var version$2 = /*#__PURE__*/Object.freeze({
+ version: version,
+ default: version$1
+ });
+
+ var Events$4, Group, IORedisConnection$2, RedisConnection$2, Scripts$3, parser$6;
+ parser$6 = parser;
+ Events$4 = Events_1;
+ RedisConnection$2 = RedisConnection_1;
+ IORedisConnection$2 = IORedisConnection_1;
+ Scripts$3 = Scripts;
+
+ Group = function () {
+ var Group =
+ /*#__PURE__*/
+ function () {
+ function Group() {
+ var limiterOptions = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
+
+ _classCallCheck(this, Group);
+
+ this.deleteKey = this.deleteKey.bind(this);
+ this.limiterOptions = limiterOptions;
+ parser$6.load(this.limiterOptions, this.defaults, this);
+ this.Events = new Events$4(this);
+ this.instances = {};
+ this.Bottleneck = Bottleneck_1;
+
+ this._startAutoCleanup();
+
+ this.sharedConnection = this.connection != null;
+
+ if (this.connection == null) {
+ if (this.limiterOptions.datastore === "redis") {
+ this.connection = new RedisConnection$2(Object.assign({}, this.limiterOptions, {
+ Events: this.Events
+ }));
+ } else if (this.limiterOptions.datastore === "ioredis") {
+ this.connection = new IORedisConnection$2(Object.assign({}, this.limiterOptions, {
+ Events: this.Events
+ }));
+ }
+ }
+ }
+
+ _createClass(Group, [{
+ key: "key",
+ value: function key() {
+ var _this = this;
+
+ var _key = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : "";
+
+ var ref;
+ return (ref = this.instances[_key]) != null ? ref : function () {
+ var limiter;
+ limiter = _this.instances[_key] = new _this.Bottleneck(Object.assign(_this.limiterOptions, {
+ id: "".concat(_this.id, "-").concat(_key),
+ timeout: _this.timeout,
+ connection: _this.connection
+ }));
+
+ _this.Events.trigger("created", limiter, _key);
+
+ return limiter;
+ }();
+ }
+ }, {
+ key: "deleteKey",
+ value: function () {
+ var _deleteKey = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee() {
+ var key,
+ deleted,
+ instance,
+ _args = arguments;
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ key = _args.length > 0 && _args[0] !== undefined ? _args[0] : "";
+ instance = this.instances[key];
+
+ if (!this.connection) {
+ _context.next = 6;
+ break;
+ }
+
+ _context.next = 5;
+ return this.connection.__runCommand__(['del'].concat(_toConsumableArray(Scripts$3.allKeys("".concat(this.id, "-").concat(key)))));
+
+ case 5:
+ deleted = _context.sent;
+
+ case 6:
+ if (!(instance != null)) {
+ _context.next = 10;
+ break;
+ }
+
+ delete this.instances[key];
+ _context.next = 10;
+ return instance.disconnect();
+
+ case 10:
+ return _context.abrupt("return", instance != null || deleted > 0);
+
+ case 11:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, this);
+ }));
+
+ function deleteKey() {
+ return _deleteKey.apply(this, arguments);
+ }
+
+ return deleteKey;
+ }()
+ }, {
+ key: "limiters",
+ value: function limiters() {
+ var k, ref, results, v;
+ ref = this.instances;
+ results = [];
+
+ for (k in ref) {
+ v = ref[k];
+ results.push({
+ key: k,
+ limiter: v
+ });
+ }
+
+ return results;
+ }
+ }, {
+ key: "keys",
+ value: function keys() {
+ return Object.keys(this.instances);
+ }
+ }, {
+ key: "clusterKeys",
+ value: function () {
+ var _clusterKeys = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2() {
+ var cursor, end, found, i, k, keys, len, next, start, _ref, _ref2;
+
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ if (!(this.connection == null)) {
+ _context2.next = 2;
+ break;
+ }
+
+ return _context2.abrupt("return", this.Promise.resolve(this.keys()));
+
+ case 2:
+ keys = [];
+ cursor = null;
+ start = "b_".concat(this.id, "-").length;
+ end = "_settings".length;
+
+ case 6:
+ if (!(cursor !== 0)) {
+ _context2.next = 17;
+ break;
+ }
+
+ _context2.next = 9;
+ return this.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", "b_".concat(this.id, "-*_settings"), "count", 10000]);
+
+ case 9:
+ _ref = _context2.sent;
+ _ref2 = _slicedToArray(_ref, 2);
+ next = _ref2[0];
+ found = _ref2[1];
+ cursor = ~~next;
+
+ for (i = 0, len = found.length; i < len; i++) {
+ k = found[i];
+ keys.push(k.slice(start, -end));
+ }
+
+ _context2.next = 6;
+ break;
+
+ case 17:
+ return _context2.abrupt("return", keys);
+
+ case 18:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2, this);
+ }));
+
+ function clusterKeys() {
+ return _clusterKeys.apply(this, arguments);
+ }
+
+ return clusterKeys;
+ }()
+ }, {
+ key: "_startAutoCleanup",
+ value: function _startAutoCleanup() {
+ var _this2 = this;
+
+ var base;
+ clearInterval(this.interval);
+ return typeof (base = this.interval = setInterval(
+ /*#__PURE__*/
+ _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee3() {
+ var e, k, ref, results, time, v;
+ return regeneratorRuntime.wrap(function _callee3$(_context3) {
+ while (1) {
+ switch (_context3.prev = _context3.next) {
+ case 0:
+ time = Date.now();
+ ref = _this2.instances;
+ results = [];
+ _context3.t0 = regeneratorRuntime.keys(ref);
+
+ case 4:
+ if ((_context3.t1 = _context3.t0()).done) {
+ _context3.next = 23;
+ break;
+ }
+
+ k = _context3.t1.value;
+ v = ref[k];
+ _context3.prev = 7;
+ _context3.next = 10;
+ return v._store.__groupCheck__(time);
+
+ case 10:
+ if (!_context3.sent) {
+ _context3.next = 14;
+ break;
+ }
+
+ results.push(_this2.deleteKey(k));
+ _context3.next = 15;
+ break;
+
+ case 14:
+ results.push(void 0);
+
+ case 15:
+ _context3.next = 21;
+ break;
+
+ case 17:
+ _context3.prev = 17;
+ _context3.t2 = _context3["catch"](7);
+ e = _context3.t2;
+ results.push(v.Events.trigger("error", e));
+
+ case 21:
+ _context3.next = 4;
+ break;
+
+ case 23:
+ return _context3.abrupt("return", results);
+
+ case 24:
+ case "end":
+ return _context3.stop();
+ }
+ }
+ }, _callee3, null, [[7, 17]]);
+ })), this.timeout / 2)).unref === "function" ? base.unref() : void 0;
+ }
+ }, {
+ key: "updateSettings",
+ value: function updateSettings() {
+ var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
+ parser$6.overwrite(options, this.defaults, this);
+ parser$6.overwrite(options, options, this.limiterOptions);
+
+ if (options.timeout != null) {
+ return this._startAutoCleanup();
+ }
+ }
+ }, {
+ key: "disconnect",
+ value: function disconnect() {
+ var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true;
+ var ref;
+
+ if (!this.sharedConnection) {
+ return (ref = this.connection) != null ? ref.disconnect(flush) : void 0;
+ }
+ }
+ }]);
+
+ return Group;
+ }();
+ Group.prototype.defaults = {
+ timeout: 1000 * 60 * 5,
+ connection: null,
+ Promise: Promise,
+ id: "group-key"
+ };
+ return Group;
+ }.call(commonjsGlobal);
+
+ var Group_1 = Group;
+
+ var Batcher, Events$5, parser$7;
+ parser$7 = parser;
+ Events$5 = Events_1;
+
+ Batcher = function () {
+ var Batcher =
+ /*#__PURE__*/
+ function () {
+ function Batcher() {
+ var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
+
+ _classCallCheck(this, Batcher);
+
+ this.options = options;
+ parser$7.load(this.options, this.defaults, this);
+ this.Events = new Events$5(this);
+ this._arr = [];
+
+ this._resetPromise();
+
+ this._lastFlush = Date.now();
+ }
+
+ _createClass(Batcher, [{
+ key: "_resetPromise",
+ value: function _resetPromise() {
+ var _this = this;
+
+ return this._promise = new this.Promise(function (res, rej) {
+ return _this._resolve = res;
+ });
+ }
+ }, {
+ key: "_flush",
+ value: function _flush() {
+ clearTimeout(this._timeout);
+ this._lastFlush = Date.now();
+
+ this._resolve();
+
+ this.Events.trigger("batch", this._arr);
+ this._arr = [];
+ return this._resetPromise();
+ }
+ }, {
+ key: "add",
+ value: function add(data) {
+ var _this2 = this;
+
+ var ret;
+
+ this._arr.push(data);
+
+ ret = this._promise;
+
+ if (this._arr.length === this.maxSize) {
+ this._flush();
+ } else if (this.maxTime != null && this._arr.length === 1) {
+ this._timeout = setTimeout(function () {
+ return _this2._flush();
+ }, this.maxTime);
+ }
+
+ return ret;
+ }
+ }]);
+
+ return Batcher;
+ }();
+ Batcher.prototype.defaults = {
+ maxTime: null,
+ maxSize: null,
+ Promise: Promise
+ };
+ return Batcher;
+ }.call(commonjsGlobal);
+
+ var Batcher_1 = Batcher;
+
+ var require$$8 = getCjsExportFromNamespace(version$2);
+
+ var Bottleneck,
+ DEFAULT_PRIORITY$1,
+ Events$6,
+ Job$1,
+ LocalDatastore$1,
+ NUM_PRIORITIES$1,
+ Queues$1,
+ RedisDatastore$1,
+ States$1,
+ Sync$1,
+ parser$8,
+ splice = [].splice;
+ NUM_PRIORITIES$1 = 10;
+ DEFAULT_PRIORITY$1 = 5;
+ parser$8 = parser;
+ Queues$1 = Queues_1;
+ Job$1 = Job_1;
+ LocalDatastore$1 = LocalDatastore_1;
+ RedisDatastore$1 = RedisDatastore_1;
+ Events$6 = Events_1;
+ States$1 = States_1;
+ Sync$1 = Sync_1;
+
+ Bottleneck = function () {
+ var Bottleneck =
+ /*#__PURE__*/
+ function () {
+ function Bottleneck() {
+ var _this = this;
+
+ var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
+
+ _classCallCheck(this, Bottleneck);
+
+ var storeInstanceOptions, storeOptions;
+ this._addToQueue = this._addToQueue.bind(this);
+
+ for (var _len = arguments.length, invalid = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {
+ invalid[_key - 1] = arguments[_key];
+ }
+
+ this._validateOptions(options, invalid);
+
+ parser$8.load(options, this.instanceDefaults, this);
+ this._queues = new Queues$1(NUM_PRIORITIES$1);
+ this._scheduled = {};
+ this._states = new States$1(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : []));
+ this._limiter = null;
+ this.Events = new Events$6(this);
+ this._submitLock = new Sync$1("submit", this.Promise);
+ this._registerLock = new Sync$1("register", this.Promise);
+ storeOptions = parser$8.load(options, this.storeDefaults, {});
+
+ this._store = function () {
+ if (this.datastore === "redis" || this.datastore === "ioredis" || this.connection != null) {
+ storeInstanceOptions = parser$8.load(options, this.redisStoreDefaults, {});
+ return new RedisDatastore$1(this, storeOptions, storeInstanceOptions);
+ } else if (this.datastore === "local") {
+ storeInstanceOptions = parser$8.load(options, this.localStoreDefaults, {});
+ return new LocalDatastore$1(this, storeOptions, storeInstanceOptions);
+ } else {
+ throw new Bottleneck.prototype.BottleneckError("Invalid datastore type: ".concat(this.datastore));
+ }
+ }.call(this);
+
+ this._queues.on("leftzero", function () {
+ var ref;
+ return (ref = _this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0;
+ });
+
+ this._queues.on("zero", function () {
+ var ref;
+ return (ref = _this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0;
+ });
+ }
+
+ _createClass(Bottleneck, [{
+ key: "_validateOptions",
+ value: function _validateOptions(options, invalid) {
+ if (!(options != null && _typeof(options) === "object" && invalid.length === 0)) {
+ throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1.");
+ }
+ }
+ }, {
+ key: "ready",
+ value: function ready() {
+ return this._store.ready;
+ }
+ }, {
+ key: "clients",
+ value: function clients() {
+ return this._store.clients;
+ }
+ }, {
+ key: "channel",
+ value: function channel() {
+ return "b_".concat(this.id);
+ }
+ }, {
+ key: "channel_client",
+ value: function channel_client() {
+ return "b_".concat(this.id, "_").concat(this._store.clientId);
+ }
+ }, {
+ key: "publish",
+ value: function publish(message) {
+ return this._store.__publish__(message);
+ }
+ }, {
+ key: "disconnect",
+ value: function disconnect() {
+ var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true;
+ return this._store.__disconnect__(flush);
+ }
+ }, {
+ key: "chain",
+ value: function chain(_limiter) {
+ this._limiter = _limiter;
+ return this;
+ }
+ }, {
+ key: "queued",
+ value: function queued(priority) {
+ return this._queues.queued(priority);
+ }
+ }, {
+ key: "clusterQueued",
+ value: function clusterQueued() {
+ return this._store.__queued__();
+ }
+ }, {
+ key: "empty",
+ value: function empty() {
+ return this.queued() === 0 && this._submitLock.isEmpty();
+ }
+ }, {
+ key: "running",
+ value: function running() {
+ return this._store.__running__();
+ }
+ }, {
+ key: "done",
+ value: function done() {
+ return this._store.__done__();
+ }
+ }, {
+ key: "jobStatus",
+ value: function jobStatus(id) {
+ return this._states.jobStatus(id);
+ }
+ }, {
+ key: "jobs",
+ value: function jobs(status) {
+ return this._states.statusJobs(status);
+ }
+ }, {
+ key: "counts",
+ value: function counts() {
+ return this._states.statusCounts();
+ }
+ }, {
+ key: "_randomIndex",
+ value: function _randomIndex() {
+ return Math.random().toString(36).slice(2);
+ }
+ }, {
+ key: "check",
+ value: function check() {
+ var weight = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 1;
+ return this._store.__check__(weight);
+ }
+ }, {
+ key: "_clearGlobalState",
+ value: function _clearGlobalState(index) {
+ if (this._scheduled[index] != null) {
+ clearTimeout(this._scheduled[index].expiration);
+ delete this._scheduled[index];
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }, {
+ key: "_free",
+ value: function () {
+ var _free2 = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee(index, job, options, eventInfo) {
+ var e, running, _ref;
+
+ return regeneratorRuntime.wrap(function _callee$(_context) {
+ while (1) {
+ switch (_context.prev = _context.next) {
+ case 0:
+ _context.prev = 0;
+ _context.next = 3;
+ return this._store.__free__(index, options.weight);
+
+ case 3:
+ _ref = _context.sent;
+ running = _ref.running;
+ this.Events.trigger("debug", "Freed ".concat(options.id), eventInfo);
+
+ if (!(running === 0 && this.empty())) {
+ _context.next = 8;
+ break;
+ }
+
+ return _context.abrupt("return", this.Events.trigger("idle"));
+
+ case 8:
+ _context.next = 14;
+ break;
+
+ case 10:
+ _context.prev = 10;
+ _context.t0 = _context["catch"](0);
+ e = _context.t0;
+ return _context.abrupt("return", this.Events.trigger("error", e));
+
+ case 14:
+ case "end":
+ return _context.stop();
+ }
+ }
+ }, _callee, this, [[0, 10]]);
+ }));
+
+ function _free(_x, _x2, _x3, _x4) {
+ return _free2.apply(this, arguments);
+ }
+
+ return _free;
+ }()
+ }, {
+ key: "_run",
+ value: function _run(index, job, wait) {
+ var _this2 = this;
+
+ var clearGlobalState, free, run;
+ job.doRun();
+ clearGlobalState = this._clearGlobalState.bind(this, index);
+ run = this._run.bind(this, index, job);
+ free = this._free.bind(this, index, job);
+ return this._scheduled[index] = {
+ timeout: setTimeout(function () {
+ return job.doExecute(_this2._limiter, clearGlobalState, run, free);
+ }, wait),
+ expiration: job.options.expiration != null ? setTimeout(function () {
+ return job.doExpire(clearGlobalState, run, free);
+ }, wait + job.options.expiration) : void 0,
+ job: job
+ };
+ }
+ }, {
+ key: "_drainOne",
+ value: function _drainOne(capacity) {
+ var _this3 = this;
+
+ return this._registerLock.schedule(function () {
+ var args, index, next, options, queue;
+
+ if (_this3.queued() === 0) {
+ return _this3.Promise.resolve(null);
+ }
+
+ queue = _this3._queues.getFirst();
+
+ var _next = next = queue.first();
+
+ options = _next.options;
+ args = _next.args;
+
+ if (capacity != null && options.weight > capacity) {
+ return _this3.Promise.resolve(null);
+ }
+
+ _this3.Events.trigger("debug", "Draining ".concat(options.id), {
+ args: args,
+ options: options
+ });
+
+ index = _this3._randomIndex();
+ return _this3._store.__register__(index, options.weight, options.expiration).then(function (_ref2) {
+ var success = _ref2.success,
+ wait = _ref2.wait,
+ reservoir = _ref2.reservoir;
+ var empty;
+
+ _this3.Events.trigger("debug", "Drained ".concat(options.id), {
+ success: success,
+ args: args,
+ options: options
+ });
+
+ if (success) {
+ queue.shift();
+ empty = _this3.empty();
+
+ if (empty) {
+ _this3.Events.trigger("empty");
+ }
+
+ if (reservoir === 0) {
+ _this3.Events.trigger("depleted", empty);
+ }
+
+ _this3._run(index, next, wait);
+
+ return _this3.Promise.resolve(options.weight);
+ } else {
+ return _this3.Promise.resolve(null);
+ }
+ });
+ });
+ }
+ }, {
+ key: "_drainAll",
+ value: function _drainAll(capacity) {
+ var _this4 = this;
+
+ var total = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 0;
+ return this._drainOne(capacity).then(function (drained) {
+ var newCapacity;
+
+ if (drained != null) {
+ newCapacity = capacity != null ? capacity - drained : capacity;
+ return _this4._drainAll(newCapacity, total + drained);
+ } else {
+ return _this4.Promise.resolve(total);
+ }
+ })["catch"](function (e) {
+ return _this4.Events.trigger("error", e);
+ });
+ }
+ }, {
+ key: "_dropAllQueued",
+ value: function _dropAllQueued(message) {
+ return this._queues.shiftAll(function (job) {
+ return job.doDrop({
+ message: message
+ });
+ });
+ }
+ }, {
+ key: "stop",
+ value: function stop() {
+ var _this5 = this;
+
+ var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};
+ var done, waitForExecuting;
+ options = parser$8.load(options, this.stopDefaults);
+
+ waitForExecuting = function waitForExecuting(at) {
+ var finished;
+
+ finished = function finished() {
+ var counts;
+ counts = _this5._states.counts;
+ return counts[0] + counts[1] + counts[2] + counts[3] === at;
+ };
+
+ return new _this5.Promise(function (resolve, reject) {
+ if (finished()) {
+ return resolve();
+ } else {
+ return _this5.on("done", function () {
+ if (finished()) {
+ _this5.removeAllListeners("done");
+
+ return resolve();
+ }
+ });
+ }
+ });
+ };
+
+ done = options.dropWaitingJobs ? (this._run = function (index, next) {
+ return next.doDrop({
+ message: options.dropErrorMessage
+ });
+ }, this._drainOne = function () {
+ return _this5.Promise.resolve(null);
+ }, this._registerLock.schedule(function () {
+ return _this5._submitLock.schedule(function () {
+ var k, ref, v;
+ ref = _this5._scheduled;
+
+ for (k in ref) {
+ v = ref[k];
+
+ if (_this5.jobStatus(v.job.options.id) === "RUNNING") {
+ clearTimeout(v.timeout);
+ clearTimeout(v.expiration);
+ v.job.doDrop({
+ message: options.dropErrorMessage
+ });
+ }
+ }
+
+ _this5._dropAllQueued(options.dropErrorMessage);
+
+ return waitForExecuting(0);
+ });
+ })) : this.schedule({
+ priority: NUM_PRIORITIES$1 - 1,
+ weight: 0
+ }, function () {
+ return waitForExecuting(1);
+ });
+
+ this._receive = function (job) {
+ return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage));
+ };
+
+ this.stop = function () {
+ return _this5.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called"));
+ };
+
+ return done;
+ }
+ }, {
+ key: "_addToQueue",
+ value: function () {
+ var _addToQueue2 = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee2(job) {
+ var args, blocked, error, options, reachedHWM, shifted, strategy, _ref3;
+
+ return regeneratorRuntime.wrap(function _callee2$(_context2) {
+ while (1) {
+ switch (_context2.prev = _context2.next) {
+ case 0:
+ args = job.args;
+ options = job.options;
+ _context2.prev = 2;
+ _context2.next = 5;
+ return this._store.__submit__(this.queued(), options.weight);
+
+ case 5:
+ _ref3 = _context2.sent;
+ reachedHWM = _ref3.reachedHWM;
+ blocked = _ref3.blocked;
+ strategy = _ref3.strategy;
+ _context2.next = 17;
+ break;
+
+ case 11:
+ _context2.prev = 11;
+ _context2.t0 = _context2["catch"](2);
+ error = _context2.t0;
+ this.Events.trigger("debug", "Could not queue ".concat(options.id), {
+ args: args,
+ options: options,
+ error: error
+ });
+ job.doDrop({
+ error: error
+ });
+ return _context2.abrupt("return", false);
+
+ case 17:
+ if (!blocked) {
+ _context2.next = 22;
+ break;
+ }
+
+ job.doDrop();
+ return _context2.abrupt("return", true);
+
+ case 22:
+ if (!reachedHWM) {
+ _context2.next = 28;
+ break;
+ }
+
+ shifted = strategy === Bottleneck.prototype.strategy.LEAK ? this._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? this._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0;
+
+ if (shifted != null) {
+ shifted.doDrop();
+ }
+
+ if (!(shifted == null || strategy === Bottleneck.prototype.strategy.OVERFLOW)) {
+ _context2.next = 28;
+ break;
+ }
+
+ if (shifted == null) {
+ job.doDrop();
+ }
+
+ return _context2.abrupt("return", reachedHWM);
+
+ case 28:
+ job.doQueue(reachedHWM, blocked);
+
+ this._queues.push(job);
+
+ _context2.next = 32;
+ return this._drainAll();
+
+ case 32:
+ return _context2.abrupt("return", reachedHWM);
+
+ case 33:
+ case "end":
+ return _context2.stop();
+ }
+ }
+ }, _callee2, this, [[2, 11]]);
+ }));
+
+ function _addToQueue(_x5) {
+ return _addToQueue2.apply(this, arguments);
+ }
+
+ return _addToQueue;
+ }()
+ }, {
+ key: "_receive",
+ value: function _receive(job) {
+ if (this._states.jobStatus(job.options.id) != null) {
+ job._reject(new Bottleneck.prototype.BottleneckError("A job with the same id already exists (id=".concat(job.options.id, ")")));
+
+ return false;
+ } else {
+ job.doReceive();
+ return this._submitLock.schedule(this._addToQueue, job);
+ }
+ }
+ }, {
+ key: "submit",
+ value: function submit() {
+ var _this6 = this;
+
+ for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {
+ args[_key2] = arguments[_key2];
+ }
+
+ var cb, fn, job, options, ref, ref1, task;
+
+ if (typeof args[0] === "function") {
+ var _ref4, _ref5, _splice$call, _splice$call2;
+
+ ref = args, (_ref4 = ref, _ref5 = _toArray(_ref4), fn = _ref5[0], args = _ref5.slice(1), _ref4), (_splice$call = splice.call(args, -1), _splice$call2 = _slicedToArray(_splice$call, 1), cb = _splice$call2[0], _splice$call);
+ options = parser$8.load({}, this.jobDefaults);
+ } else {
+ var _ref6, _ref7, _splice$call3, _splice$call4;
+
+ ref1 = args, (_ref6 = ref1, _ref7 = _toArray(_ref6), options = _ref7[0], fn = _ref7[1], args = _ref7.slice(2), _ref6), (_splice$call3 = splice.call(args, -1), _splice$call4 = _slicedToArray(_splice$call3, 1), cb = _splice$call4[0], _splice$call3);
+ options = parser$8.load(options, this.jobDefaults);
+ }
+
+ task = function task() {
+ for (var _len3 = arguments.length, args = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) {
+ args[_key3] = arguments[_key3];
+ }
+
+ return new _this6.Promise(function (resolve, reject) {
+ return fn.apply(void 0, args.concat([function () {
+ for (var _len4 = arguments.length, args = new Array(_len4), _key4 = 0; _key4 < _len4; _key4++) {
+ args[_key4] = arguments[_key4];
+ }
+
+ return (args[0] != null ? reject : resolve)(args);
+ }]));
+ });
+ };
+
+ job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
+ job.promise.then(function (args) {
+ return typeof cb === "function" ? cb.apply(void 0, _toConsumableArray(args)) : void 0;
+ })["catch"](function (args) {
+ if (Array.isArray(args)) {
+ return typeof cb === "function" ? cb.apply(void 0, _toConsumableArray(args)) : void 0;
+ } else {
+ return typeof cb === "function" ? cb(args) : void 0;
+ }
+ });
+ return this._receive(job);
+ }
+ }, {
+ key: "schedule",
+ value: function schedule() {
+ for (var _len5 = arguments.length, args = new Array(_len5), _key5 = 0; _key5 < _len5; _key5++) {
+ args[_key5] = arguments[_key5];
+ }
+
+ var job, options, task;
+
+ if (typeof args[0] === "function") {
+ var _args3 = args;
+
+ var _args4 = _toArray(_args3);
+
+ task = _args4[0];
+ args = _args4.slice(1);
+ options = {};
+ } else {
+ var _args5 = args;
+
+ var _args6 = _toArray(_args5);
+
+ options = _args6[0];
+ task = _args6[1];
+ args = _args6.slice(2);
+ }
+
+ job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
+
+ this._receive(job);
+
+ return job.promise;
+ }
+ }, {
+ key: "wrap",
+ value: function wrap(fn) {
+ var schedule, wrapped;
+ schedule = this.schedule.bind(this);
+
+ wrapped = function wrapped() {
+ for (var _len6 = arguments.length, args = new Array(_len6), _key6 = 0; _key6 < _len6; _key6++) {
+ args[_key6] = arguments[_key6];
+ }
+
+ return schedule.apply(void 0, [fn.bind(this)].concat(args));
+ };
+
+ wrapped.withOptions = function (options) {
+ for (var _len7 = arguments.length, args = new Array(_len7 > 1 ? _len7 - 1 : 0), _key7 = 1; _key7 < _len7; _key7++) {
+ args[_key7 - 1] = arguments[_key7];
+ }
+
+ return schedule.apply(void 0, [options, fn].concat(args));
+ };
+
+ return wrapped;
+ }
+ }, {
+ key: "updateSettings",
+ value: function () {
+ var _updateSettings = _asyncToGenerator(
+ /*#__PURE__*/
+ regeneratorRuntime.mark(function _callee3() {
+ var options,
+ _args7 = arguments;
+ return regeneratorRuntime.wrap(function _callee3$(_context3) {
+ while (1) {
+ switch (_context3.prev = _context3.next) {
+ case 0:
+ options = _args7.length > 0 && _args7[0] !== undefined ? _args7[0] : {};
+ _context3.next = 3;
+ return this._store.__updateSettings__(parser$8.overwrite(options, this.storeDefaults));
+
+ case 3:
+ parser$8.overwrite(options, this.instanceDefaults, this);
+ return _context3.abrupt("return", this);
+
+ case 5:
+ case "end":
+ return _context3.stop();
+ }
+ }
+ }, _callee3, this);
+ }));
+
+ function updateSettings() {
+ return _updateSettings.apply(this, arguments);
+ }
+
+ return updateSettings;
+ }()
+ }, {
+ key: "currentReservoir",
+ value: function currentReservoir() {
+ return this._store.__currentReservoir__();
+ }
+ }, {
+ key: "incrementReservoir",
+ value: function incrementReservoir() {
+ var incr = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0;
+ return this._store.__incrementReservoir__(incr);
+ }
+ }]);
+
+ return Bottleneck;
+ }();
+ Bottleneck["default"] = Bottleneck;
+ Bottleneck.Events = Events$6;
+ Bottleneck.version = Bottleneck.prototype.version = require$$8.version;
+ Bottleneck.strategy = Bottleneck.prototype.strategy = {
+ LEAK: 1,
+ OVERFLOW: 2,
+ OVERFLOW_PRIORITY: 4,
+ BLOCK: 3
+ };
+ Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = BottleneckError_1;
+ Bottleneck.Group = Bottleneck.prototype.Group = Group_1;
+ Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = RedisConnection_1;
+ Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = IORedisConnection_1;
+ Bottleneck.Batcher = Bottleneck.prototype.Batcher = Batcher_1;
+ Bottleneck.prototype.jobDefaults = {
+ priority: DEFAULT_PRIORITY$1,
+ weight: 1,
+ expiration: null,
+ id: ""
+ };
+ Bottleneck.prototype.storeDefaults = {
+ maxConcurrent: null,
+ minTime: 0,
+ highWater: null,
+ strategy: Bottleneck.prototype.strategy.LEAK,
+ penalty: null,
+ reservoir: null,
+ reservoirRefreshInterval: null,
+ reservoirRefreshAmount: null,
+ reservoirIncreaseInterval: null,
+ reservoirIncreaseAmount: null,
+ reservoirIncreaseMaximum: null
+ };
+ Bottleneck.prototype.localStoreDefaults = {
+ Promise: Promise,
+ timeout: null,
+ heartbeatInterval: 250
+ };
+ Bottleneck.prototype.redisStoreDefaults = {
+ Promise: Promise,
+ timeout: null,
+ heartbeatInterval: 5000,
+ clientTimeout: 10000,
+ Redis: null,
+ clientOptions: {},
+ clusterNodes: null,
+ clearDatastore: false,
+ connection: null
+ };
+ Bottleneck.prototype.instanceDefaults = {
+ datastore: "local",
+ connection: null,
+ id: "",
+ rejectOnDrop: true,
+ trackDoneStatus: false,
+ Promise: Promise
+ };
+ Bottleneck.prototype.stopDefaults = {
+ enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.",
+ dropWaitingJobs: true,
+ dropErrorMessage: "This limiter has been stopped."
+ };
+ return Bottleneck;
+ }.call(commonjsGlobal);
+
+ var Bottleneck_1 = Bottleneck;
+
+ var es5 = Bottleneck_1;
+
+ return es5;
+
+})));
diff --git a/node_modules/bottleneck/lib/Batcher.js b/node_modules/bottleneck/lib/Batcher.js
new file mode 100644
index 000000000..f52892afc
--- /dev/null
+++ b/node_modules/bottleneck/lib/Batcher.js
@@ -0,0 +1,66 @@
+"use strict";
+
+var Batcher, Events, parser;
+parser = require("./parser");
+Events = require("./Events");
+
+Batcher = function () {
+ class Batcher {
+ constructor(options = {}) {
+ this.options = options;
+ parser.load(this.options, this.defaults, this);
+ this.Events = new Events(this);
+ this._arr = [];
+
+ this._resetPromise();
+
+ this._lastFlush = Date.now();
+ }
+
+ _resetPromise() {
+ return this._promise = new this.Promise((res, rej) => {
+ return this._resolve = res;
+ });
+ }
+
+ _flush() {
+ clearTimeout(this._timeout);
+ this._lastFlush = Date.now();
+
+ this._resolve();
+
+ this.Events.trigger("batch", this._arr);
+ this._arr = [];
+ return this._resetPromise();
+ }
+
+ add(data) {
+ var ret;
+
+ this._arr.push(data);
+
+ ret = this._promise;
+
+ if (this._arr.length === this.maxSize) {
+ this._flush();
+ } else if (this.maxTime != null && this._arr.length === 1) {
+ this._timeout = setTimeout(() => {
+ return this._flush();
+ }, this.maxTime);
+ }
+
+ return ret;
+ }
+
+ }
+
+ ;
+ Batcher.prototype.defaults = {
+ maxTime: null,
+ maxSize: null,
+ Promise: Promise
+ };
+ return Batcher;
+}.call(void 0);
+
+module.exports = Batcher;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/Bottleneck.js b/node_modules/bottleneck/lib/Bottleneck.js
new file mode 100644
index 000000000..ff640a15a
--- /dev/null
+++ b/node_modules/bottleneck/lib/Bottleneck.js
@@ -0,0 +1,594 @@
+"use strict";
+
+function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
+
+function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
+
+function _toArray(arr) { return _arrayWithHoles(arr) || _iterableToArray(arr) || _nonIterableRest(); }
+
+function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
+
+function _iterableToArray(iter) { if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter); }
+
+function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var Bottleneck,
+ DEFAULT_PRIORITY,
+ Events,
+ Job,
+ LocalDatastore,
+ NUM_PRIORITIES,
+ Queues,
+ RedisDatastore,
+ States,
+ Sync,
+ parser,
+ splice = [].splice;
+NUM_PRIORITIES = 10;
+DEFAULT_PRIORITY = 5;
+parser = require("./parser");
+Queues = require("./Queues");
+Job = require("./Job");
+LocalDatastore = require("./LocalDatastore");
+RedisDatastore = require("./RedisDatastore");
+Events = require("./Events");
+States = require("./States");
+Sync = require("./Sync");
+
+Bottleneck = function () {
+ class Bottleneck {
+ constructor(options = {}, ...invalid) {
+ var storeInstanceOptions, storeOptions;
+ this._addToQueue = this._addToQueue.bind(this);
+
+ this._validateOptions(options, invalid);
+
+ parser.load(options, this.instanceDefaults, this);
+ this._queues = new Queues(NUM_PRIORITIES);
+ this._scheduled = {};
+ this._states = new States(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : []));
+ this._limiter = null;
+ this.Events = new Events(this);
+ this._submitLock = new Sync("submit", this.Promise);
+ this._registerLock = new Sync("register", this.Promise);
+ storeOptions = parser.load(options, this.storeDefaults, {});
+
+ this._store = function () {
+ if (this.datastore === "redis" || this.datastore === "ioredis" || this.connection != null) {
+ storeInstanceOptions = parser.load(options, this.redisStoreDefaults, {});
+ return new RedisDatastore(this, storeOptions, storeInstanceOptions);
+ } else if (this.datastore === "local") {
+ storeInstanceOptions = parser.load(options, this.localStoreDefaults, {});
+ return new LocalDatastore(this, storeOptions, storeInstanceOptions);
+ } else {
+ throw new Bottleneck.prototype.BottleneckError(`Invalid datastore type: ${this.datastore}`);
+ }
+ }.call(this);
+
+ this._queues.on("leftzero", () => {
+ var ref;
+ return (ref = this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0;
+ });
+
+ this._queues.on("zero", () => {
+ var ref;
+ return (ref = this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0;
+ });
+ }
+
+ _validateOptions(options, invalid) {
+ if (!(options != null && typeof options === "object" && invalid.length === 0)) {
+ throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1.");
+ }
+ }
+
+ ready() {
+ return this._store.ready;
+ }
+
+ clients() {
+ return this._store.clients;
+ }
+
+ channel() {
+ return `b_${this.id}`;
+ }
+
+ channel_client() {
+ return `b_${this.id}_${this._store.clientId}`;
+ }
+
+ publish(message) {
+ return this._store.__publish__(message);
+ }
+
+ disconnect(flush = true) {
+ return this._store.__disconnect__(flush);
+ }
+
+ chain(_limiter) {
+ this._limiter = _limiter;
+ return this;
+ }
+
+ queued(priority) {
+ return this._queues.queued(priority);
+ }
+
+ clusterQueued() {
+ return this._store.__queued__();
+ }
+
+ empty() {
+ return this.queued() === 0 && this._submitLock.isEmpty();
+ }
+
+ running() {
+ return this._store.__running__();
+ }
+
+ done() {
+ return this._store.__done__();
+ }
+
+ jobStatus(id) {
+ return this._states.jobStatus(id);
+ }
+
+ jobs(status) {
+ return this._states.statusJobs(status);
+ }
+
+ counts() {
+ return this._states.statusCounts();
+ }
+
+ _randomIndex() {
+ return Math.random().toString(36).slice(2);
+ }
+
+ check(weight = 1) {
+ return this._store.__check__(weight);
+ }
+
+ _clearGlobalState(index) {
+ if (this._scheduled[index] != null) {
+ clearTimeout(this._scheduled[index].expiration);
+ delete this._scheduled[index];
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ _free(index, job, options, eventInfo) {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ var e, running;
+
+ try {
+ var _ref = yield _this._store.__free__(index, options.weight);
+
+ running = _ref.running;
+
+ _this.Events.trigger("debug", `Freed ${options.id}`, eventInfo);
+
+ if (running === 0 && _this.empty()) {
+ return _this.Events.trigger("idle");
+ }
+ } catch (error1) {
+ e = error1;
+ return _this.Events.trigger("error", e);
+ }
+ })();
+ }
+
+ _run(index, job, wait) {
+ var clearGlobalState, free, run;
+ job.doRun();
+ clearGlobalState = this._clearGlobalState.bind(this, index);
+ run = this._run.bind(this, index, job);
+ free = this._free.bind(this, index, job);
+ return this._scheduled[index] = {
+ timeout: setTimeout(() => {
+ return job.doExecute(this._limiter, clearGlobalState, run, free);
+ }, wait),
+ expiration: job.options.expiration != null ? setTimeout(function () {
+ return job.doExpire(clearGlobalState, run, free);
+ }, wait + job.options.expiration) : void 0,
+ job: job
+ };
+ }
+
+ _drainOne(capacity) {
+ return this._registerLock.schedule(() => {
+ var args, index, next, options, queue;
+
+ if (this.queued() === 0) {
+ return this.Promise.resolve(null);
+ }
+
+ queue = this._queues.getFirst();
+
+ var _next2 = next = queue.first();
+
+ options = _next2.options;
+ args = _next2.args;
+
+ if (capacity != null && options.weight > capacity) {
+ return this.Promise.resolve(null);
+ }
+
+ this.Events.trigger("debug", `Draining ${options.id}`, {
+ args,
+ options
+ });
+ index = this._randomIndex();
+ return this._store.__register__(index, options.weight, options.expiration).then(({
+ success,
+ wait,
+ reservoir
+ }) => {
+ var empty;
+ this.Events.trigger("debug", `Drained ${options.id}`, {
+ success,
+ args,
+ options
+ });
+
+ if (success) {
+ queue.shift();
+ empty = this.empty();
+
+ if (empty) {
+ this.Events.trigger("empty");
+ }
+
+ if (reservoir === 0) {
+ this.Events.trigger("depleted", empty);
+ }
+
+ this._run(index, next, wait);
+
+ return this.Promise.resolve(options.weight);
+ } else {
+ return this.Promise.resolve(null);
+ }
+ });
+ });
+ }
+
+ _drainAll(capacity, total = 0) {
+ return this._drainOne(capacity).then(drained => {
+ var newCapacity;
+
+ if (drained != null) {
+ newCapacity = capacity != null ? capacity - drained : capacity;
+ return this._drainAll(newCapacity, total + drained);
+ } else {
+ return this.Promise.resolve(total);
+ }
+ }).catch(e => {
+ return this.Events.trigger("error", e);
+ });
+ }
+
+ _dropAllQueued(message) {
+ return this._queues.shiftAll(function (job) {
+ return job.doDrop({
+ message
+ });
+ });
+ }
+
+ stop(options = {}) {
+ var done, waitForExecuting;
+ options = parser.load(options, this.stopDefaults);
+
+ waitForExecuting = at => {
+ var finished;
+
+ finished = () => {
+ var counts;
+ counts = this._states.counts;
+ return counts[0] + counts[1] + counts[2] + counts[3] === at;
+ };
+
+ return new this.Promise((resolve, reject) => {
+ if (finished()) {
+ return resolve();
+ } else {
+ return this.on("done", () => {
+ if (finished()) {
+ this.removeAllListeners("done");
+ return resolve();
+ }
+ });
+ }
+ });
+ };
+
+ done = options.dropWaitingJobs ? (this._run = function (index, next) {
+ return next.doDrop({
+ message: options.dropErrorMessage
+ });
+ }, this._drainOne = () => {
+ return this.Promise.resolve(null);
+ }, this._registerLock.schedule(() => {
+ return this._submitLock.schedule(() => {
+ var k, ref, v;
+ ref = this._scheduled;
+
+ for (k in ref) {
+ v = ref[k];
+
+ if (this.jobStatus(v.job.options.id) === "RUNNING") {
+ clearTimeout(v.timeout);
+ clearTimeout(v.expiration);
+ v.job.doDrop({
+ message: options.dropErrorMessage
+ });
+ }
+ }
+
+ this._dropAllQueued(options.dropErrorMessage);
+
+ return waitForExecuting(0);
+ });
+ })) : this.schedule({
+ priority: NUM_PRIORITIES - 1,
+ weight: 0
+ }, () => {
+ return waitForExecuting(1);
+ });
+
+ this._receive = function (job) {
+ return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage));
+ };
+
+ this.stop = () => {
+ return this.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called"));
+ };
+
+ return done;
+ }
+
+ _addToQueue(job) {
+ var _this2 = this;
+
+ return _asyncToGenerator(function* () {
+ var args, blocked, error, options, reachedHWM, shifted, strategy;
+ args = job.args;
+ options = job.options;
+
+ try {
+ var _ref2 = yield _this2._store.__submit__(_this2.queued(), options.weight);
+
+ reachedHWM = _ref2.reachedHWM;
+ blocked = _ref2.blocked;
+ strategy = _ref2.strategy;
+ } catch (error1) {
+ error = error1;
+
+ _this2.Events.trigger("debug", `Could not queue ${options.id}`, {
+ args,
+ options,
+ error
+ });
+
+ job.doDrop({
+ error
+ });
+ return false;
+ }
+
+ if (blocked) {
+ job.doDrop();
+ return true;
+ } else if (reachedHWM) {
+ shifted = strategy === Bottleneck.prototype.strategy.LEAK ? _this2._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? _this2._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0;
+
+ if (shifted != null) {
+ shifted.doDrop();
+ }
+
+ if (shifted == null || strategy === Bottleneck.prototype.strategy.OVERFLOW) {
+ if (shifted == null) {
+ job.doDrop();
+ }
+
+ return reachedHWM;
+ }
+ }
+
+ job.doQueue(reachedHWM, blocked);
+
+ _this2._queues.push(job);
+
+ yield _this2._drainAll();
+ return reachedHWM;
+ })();
+ }
+
+ _receive(job) {
+ if (this._states.jobStatus(job.options.id) != null) {
+ job._reject(new Bottleneck.prototype.BottleneckError(`A job with the same id already exists (id=${job.options.id})`));
+
+ return false;
+ } else {
+ job.doReceive();
+ return this._submitLock.schedule(this._addToQueue, job);
+ }
+ }
+
+ submit(...args) {
+ var cb, fn, job, options, ref, ref1, task;
+
+ if (typeof args[0] === "function") {
+ var _ref3, _ref4, _splice$call, _splice$call2;
+
+ ref = args, (_ref3 = ref, _ref4 = _toArray(_ref3), fn = _ref4[0], args = _ref4.slice(1), _ref3), (_splice$call = splice.call(args, -1), _splice$call2 = _slicedToArray(_splice$call, 1), cb = _splice$call2[0], _splice$call);
+ options = parser.load({}, this.jobDefaults);
+ } else {
+ var _ref5, _ref6, _splice$call3, _splice$call4;
+
+ ref1 = args, (_ref5 = ref1, _ref6 = _toArray(_ref5), options = _ref6[0], fn = _ref6[1], args = _ref6.slice(2), _ref5), (_splice$call3 = splice.call(args, -1), _splice$call4 = _slicedToArray(_splice$call3, 1), cb = _splice$call4[0], _splice$call3);
+ options = parser.load(options, this.jobDefaults);
+ }
+
+ task = (...args) => {
+ return new this.Promise(function (resolve, reject) {
+ return fn(...args, function (...args) {
+ return (args[0] != null ? reject : resolve)(args);
+ });
+ });
+ };
+
+ job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
+ job.promise.then(function (args) {
+ return typeof cb === "function" ? cb(...args) : void 0;
+ }).catch(function (args) {
+ if (Array.isArray(args)) {
+ return typeof cb === "function" ? cb(...args) : void 0;
+ } else {
+ return typeof cb === "function" ? cb(args) : void 0;
+ }
+ });
+ return this._receive(job);
+ }
+
+ schedule(...args) {
+ var job, options, task;
+
+ if (typeof args[0] === "function") {
+ var _args = args;
+
+ var _args2 = _toArray(_args);
+
+ task = _args2[0];
+ args = _args2.slice(1);
+ options = {};
+ } else {
+ var _args3 = args;
+
+ var _args4 = _toArray(_args3);
+
+ options = _args4[0];
+ task = _args4[1];
+ args = _args4.slice(2);
+ }
+
+ job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
+
+ this._receive(job);
+
+ return job.promise;
+ }
+
+ wrap(fn) {
+ var schedule, wrapped;
+ schedule = this.schedule.bind(this);
+
+ wrapped = function wrapped(...args) {
+ return schedule(fn.bind(this), ...args);
+ };
+
+ wrapped.withOptions = function (options, ...args) {
+ return schedule(options, fn, ...args);
+ };
+
+ return wrapped;
+ }
+
+ updateSettings(options = {}) {
+ var _this3 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this3._store.__updateSettings__(parser.overwrite(options, _this3.storeDefaults));
+ parser.overwrite(options, _this3.instanceDefaults, _this3);
+ return _this3;
+ })();
+ }
+
+ currentReservoir() {
+ return this._store.__currentReservoir__();
+ }
+
+ incrementReservoir(incr = 0) {
+ return this._store.__incrementReservoir__(incr);
+ }
+
+ }
+
+ ;
+ Bottleneck.default = Bottleneck;
+ Bottleneck.Events = Events;
+ Bottleneck.version = Bottleneck.prototype.version = require("./version.json").version;
+ Bottleneck.strategy = Bottleneck.prototype.strategy = {
+ LEAK: 1,
+ OVERFLOW: 2,
+ OVERFLOW_PRIORITY: 4,
+ BLOCK: 3
+ };
+ Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = require("./BottleneckError");
+ Bottleneck.Group = Bottleneck.prototype.Group = require("./Group");
+ Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = require("./RedisConnection");
+ Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = require("./IORedisConnection");
+ Bottleneck.Batcher = Bottleneck.prototype.Batcher = require("./Batcher");
+ Bottleneck.prototype.jobDefaults = {
+ priority: DEFAULT_PRIORITY,
+ weight: 1,
+ expiration: null,
+ id: ""
+ };
+ Bottleneck.prototype.storeDefaults = {
+ maxConcurrent: null,
+ minTime: 0,
+ highWater: null,
+ strategy: Bottleneck.prototype.strategy.LEAK,
+ penalty: null,
+ reservoir: null,
+ reservoirRefreshInterval: null,
+ reservoirRefreshAmount: null,
+ reservoirIncreaseInterval: null,
+ reservoirIncreaseAmount: null,
+ reservoirIncreaseMaximum: null
+ };
+ Bottleneck.prototype.localStoreDefaults = {
+ Promise: Promise,
+ timeout: null,
+ heartbeatInterval: 250
+ };
+ Bottleneck.prototype.redisStoreDefaults = {
+ Promise: Promise,
+ timeout: null,
+ heartbeatInterval: 5000,
+ clientTimeout: 10000,
+ Redis: null,
+ clientOptions: {},
+ clusterNodes: null,
+ clearDatastore: false,
+ connection: null
+ };
+ Bottleneck.prototype.instanceDefaults = {
+ datastore: "local",
+ connection: null,
+ id: "",
+ rejectOnDrop: true,
+ trackDoneStatus: false,
+ Promise: Promise
+ };
+ Bottleneck.prototype.stopDefaults = {
+ enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.",
+ dropWaitingJobs: true,
+ dropErrorMessage: "This limiter has been stopped."
+ };
+ return Bottleneck;
+}.call(void 0);
+
+module.exports = Bottleneck;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/BottleneckError.js b/node_modules/bottleneck/lib/BottleneckError.js
new file mode 100644
index 000000000..f8eeaff6e
--- /dev/null
+++ b/node_modules/bottleneck/lib/BottleneckError.js
@@ -0,0 +1,5 @@
+"use strict";
+
+var BottleneckError;
+BottleneckError = class BottleneckError extends Error {};
+module.exports = BottleneckError;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/DLList.js b/node_modules/bottleneck/lib/DLList.js
new file mode 100644
index 000000000..b469a6549
--- /dev/null
+++ b/node_modules/bottleneck/lib/DLList.js
@@ -0,0 +1,107 @@
+"use strict";
+
+var DLList;
+DLList = class DLList {
+ constructor(incr, decr) {
+ this.incr = incr;
+ this.decr = decr;
+ this._first = null;
+ this._last = null;
+ this.length = 0;
+ }
+
+ push(value) {
+ var node;
+ this.length++;
+
+ if (typeof this.incr === "function") {
+ this.incr();
+ }
+
+ node = {
+ value,
+ prev: this._last,
+ next: null
+ };
+
+ if (this._last != null) {
+ this._last.next = node;
+ this._last = node;
+ } else {
+ this._first = this._last = node;
+ }
+
+ return void 0;
+ }
+
+ shift() {
+ var value;
+
+ if (this._first == null) {
+ return;
+ } else {
+ this.length--;
+
+ if (typeof this.decr === "function") {
+ this.decr();
+ }
+ }
+
+ value = this._first.value;
+
+ if ((this._first = this._first.next) != null) {
+ this._first.prev = null;
+ } else {
+ this._last = null;
+ }
+
+ return value;
+ }
+
+ first() {
+ if (this._first != null) {
+ return this._first.value;
+ }
+ }
+
+ getArray() {
+ var node, ref, results;
+ node = this._first;
+ results = [];
+
+ while (node != null) {
+ results.push((ref = node, node = node.next, ref.value));
+ }
+
+ return results;
+ }
+
+ forEachShift(cb) {
+ var node;
+ node = this.shift();
+
+ while (node != null) {
+ cb(node), node = this.shift();
+ }
+
+ return void 0;
+ }
+
+ debug() {
+ var node, ref, ref1, ref2, results;
+ node = this._first;
+ results = [];
+
+ while (node != null) {
+ results.push((ref = node, node = node.next, {
+ value: ref.value,
+ prev: (ref1 = ref.prev) != null ? ref1.value : void 0,
+ next: (ref2 = ref.next) != null ? ref2.value : void 0
+ }));
+ }
+
+ return results;
+ }
+
+};
+module.exports = DLList;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/Events.js b/node_modules/bottleneck/lib/Events.js
new file mode 100644
index 000000000..e843257e9
--- /dev/null
+++ b/node_modules/bottleneck/lib/Events.js
@@ -0,0 +1,128 @@
+"use strict";
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var Events;
+Events = class Events {
+ constructor(instance) {
+ this.instance = instance;
+ this._events = {};
+
+ if (this.instance.on != null || this.instance.once != null || this.instance.removeAllListeners != null) {
+ throw new Error("An Emitter already exists for this object");
+ }
+
+ this.instance.on = (name, cb) => {
+ return this._addListener(name, "many", cb);
+ };
+
+ this.instance.once = (name, cb) => {
+ return this._addListener(name, "once", cb);
+ };
+
+ this.instance.removeAllListeners = (name = null) => {
+ if (name != null) {
+ return delete this._events[name];
+ } else {
+ return this._events = {};
+ }
+ };
+ }
+
+ _addListener(name, status, cb) {
+ var base;
+
+ if ((base = this._events)[name] == null) {
+ base[name] = [];
+ }
+
+ this._events[name].push({
+ cb,
+ status
+ });
+
+ return this.instance;
+ }
+
+ listenerCount(name) {
+ if (this._events[name] != null) {
+ return this._events[name].length;
+ } else {
+ return 0;
+ }
+ }
+
+ trigger(name, ...args) {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ var e, promises;
+
+ try {
+ if (name !== "debug") {
+ _this.trigger("debug", `Event triggered: ${name}`, args);
+ }
+
+ if (_this._events[name] == null) {
+ return;
+ }
+
+ _this._events[name] = _this._events[name].filter(function (listener) {
+ return listener.status !== "none";
+ });
+ promises = _this._events[name].map(
+ /*#__PURE__*/
+ function () {
+ var _ref = _asyncToGenerator(function* (listener) {
+ var e, returned;
+
+ if (listener.status === "none") {
+ return;
+ }
+
+ if (listener.status === "once") {
+ listener.status = "none";
+ }
+
+ try {
+ returned = typeof listener.cb === "function" ? listener.cb(...args) : void 0;
+
+ if (typeof (returned != null ? returned.then : void 0) === "function") {
+ return yield returned;
+ } else {
+ return returned;
+ }
+ } catch (error) {
+ e = error;
+
+ if ("name" !== "error") {
+ _this.trigger("error", e);
+ }
+
+ return null;
+ }
+ });
+
+ return function (_x) {
+ return _ref.apply(this, arguments);
+ };
+ }());
+ return (yield Promise.all(promises)).find(function (x) {
+ return x != null;
+ });
+ } catch (error) {
+ e = error;
+
+ if ("name" !== "error") {
+ _this.trigger("error", e);
+ }
+
+ return null;
+ }
+ })();
+ }
+
+};
+module.exports = Events;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/Group.js b/node_modules/bottleneck/lib/Group.js
new file mode 100644
index 000000000..39676a583
--- /dev/null
+++ b/node_modules/bottleneck/lib/Group.js
@@ -0,0 +1,198 @@
+"use strict";
+
+function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
+
+function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
+
+function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
+
+function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var Events, Group, IORedisConnection, RedisConnection, Scripts, parser;
+parser = require("./parser");
+Events = require("./Events");
+RedisConnection = require("./RedisConnection");
+IORedisConnection = require("./IORedisConnection");
+Scripts = require("./Scripts");
+
+Group = function () {
+ class Group {
+ constructor(limiterOptions = {}) {
+ this.deleteKey = this.deleteKey.bind(this);
+ this.limiterOptions = limiterOptions;
+ parser.load(this.limiterOptions, this.defaults, this);
+ this.Events = new Events(this);
+ this.instances = {};
+ this.Bottleneck = require("./Bottleneck");
+
+ this._startAutoCleanup();
+
+ this.sharedConnection = this.connection != null;
+
+ if (this.connection == null) {
+ if (this.limiterOptions.datastore === "redis") {
+ this.connection = new RedisConnection(Object.assign({}, this.limiterOptions, {
+ Events: this.Events
+ }));
+ } else if (this.limiterOptions.datastore === "ioredis") {
+ this.connection = new IORedisConnection(Object.assign({}, this.limiterOptions, {
+ Events: this.Events
+ }));
+ }
+ }
+ }
+
+ key(key = "") {
+ var ref;
+ return (ref = this.instances[key]) != null ? ref : (() => {
+ var limiter;
+ limiter = this.instances[key] = new this.Bottleneck(Object.assign(this.limiterOptions, {
+ id: `${this.id}-${key}`,
+ timeout: this.timeout,
+ connection: this.connection
+ }));
+ this.Events.trigger("created", limiter, key);
+ return limiter;
+ })();
+ }
+
+ deleteKey(key = "") {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ var deleted, instance;
+ instance = _this.instances[key];
+
+ if (_this.connection) {
+ deleted = yield _this.connection.__runCommand__(['del', ...Scripts.allKeys(`${_this.id}-${key}`)]);
+ }
+
+ if (instance != null) {
+ delete _this.instances[key];
+ yield instance.disconnect();
+ }
+
+ return instance != null || deleted > 0;
+ })();
+ }
+
+ limiters() {
+ var k, ref, results, v;
+ ref = this.instances;
+ results = [];
+
+ for (k in ref) {
+ v = ref[k];
+ results.push({
+ key: k,
+ limiter: v
+ });
+ }
+
+ return results;
+ }
+
+ keys() {
+ return Object.keys(this.instances);
+ }
+
+ clusterKeys() {
+ var _this2 = this;
+
+ return _asyncToGenerator(function* () {
+ var cursor, end, found, i, k, keys, len, next, start;
+
+ if (_this2.connection == null) {
+ return _this2.Promise.resolve(_this2.keys());
+ }
+
+ keys = [];
+ cursor = null;
+ start = `b_${_this2.id}-`.length;
+ end = "_settings".length;
+
+ while (cursor !== 0) {
+ var _ref = yield _this2.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", `b_${_this2.id}-*_settings`, "count", 10000]);
+
+ var _ref2 = _slicedToArray(_ref, 2);
+
+ next = _ref2[0];
+ found = _ref2[1];
+ cursor = ~~next;
+
+ for (i = 0, len = found.length; i < len; i++) {
+ k = found[i];
+ keys.push(k.slice(start, -end));
+ }
+ }
+
+ return keys;
+ })();
+ }
+
+ _startAutoCleanup() {
+ var _this3 = this;
+
+ var base;
+ clearInterval(this.interval);
+ return typeof (base = this.interval = setInterval(
+ /*#__PURE__*/
+ _asyncToGenerator(function* () {
+ var e, k, ref, results, time, v;
+ time = Date.now();
+ ref = _this3.instances;
+ results = [];
+
+ for (k in ref) {
+ v = ref[k];
+
+ try {
+ if (yield v._store.__groupCheck__(time)) {
+ results.push(_this3.deleteKey(k));
+ } else {
+ results.push(void 0);
+ }
+ } catch (error) {
+ e = error;
+ results.push(v.Events.trigger("error", e));
+ }
+ }
+
+ return results;
+ }), this.timeout / 2)).unref === "function" ? base.unref() : void 0;
+ }
+
+ updateSettings(options = {}) {
+ parser.overwrite(options, this.defaults, this);
+ parser.overwrite(options, options, this.limiterOptions);
+
+ if (options.timeout != null) {
+ return this._startAutoCleanup();
+ }
+ }
+
+ disconnect(flush = true) {
+ var ref;
+
+ if (!this.sharedConnection) {
+ return (ref = this.connection) != null ? ref.disconnect(flush) : void 0;
+ }
+ }
+
+ }
+
+ ;
+ Group.prototype.defaults = {
+ timeout: 1000 * 60 * 5,
+ connection: null,
+ Promise: Promise,
+ id: "group-key"
+ };
+ return Group;
+}.call(void 0);
+
+module.exports = Group;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/IORedisConnection.js b/node_modules/bottleneck/lib/IORedisConnection.js
new file mode 100644
index 000000000..52b28da43
--- /dev/null
+++ b/node_modules/bottleneck/lib/IORedisConnection.js
@@ -0,0 +1,186 @@
+"use strict";
+
+function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
+
+function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
+
+function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
+
+function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var Events, IORedisConnection, Scripts, parser;
+parser = require("./parser");
+Events = require("./Events");
+Scripts = require("./Scripts");
+
+IORedisConnection = function () {
+ class IORedisConnection {
+ constructor(options = {}) {
+ parser.load(options, this.defaults, this);
+
+ if (this.Redis == null) {
+ this.Redis = eval("require")("ioredis"); // Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option.
+ }
+
+ if (this.Events == null) {
+ this.Events = new Events(this);
+ }
+
+ this.terminated = false;
+
+ if (this.clusterNodes != null) {
+ this.client = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
+ this.subscriber = new this.Redis.Cluster(this.clusterNodes, this.clientOptions);
+ } else if (this.client != null && this.client.duplicate == null) {
+ this.subscriber = new this.Redis.Cluster(this.client.startupNodes, this.client.options);
+ } else {
+ if (this.client == null) {
+ this.client = new this.Redis(this.clientOptions);
+ }
+
+ this.subscriber = this.client.duplicate();
+ }
+
+ this.limiters = {};
+ this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => {
+ this._loadScripts();
+
+ return {
+ client: this.client,
+ subscriber: this.subscriber
+ };
+ });
+ }
+
+ _setup(client, sub) {
+ client.setMaxListeners(0);
+ return new this.Promise((resolve, reject) => {
+ client.on("error", e => {
+ return this.Events.trigger("error", e);
+ });
+
+ if (sub) {
+ client.on("message", (channel, message) => {
+ var ref;
+ return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
+ });
+ }
+
+ if (client.status === "ready") {
+ return resolve();
+ } else {
+ return client.once("ready", resolve);
+ }
+ });
+ }
+
+ _loadScripts() {
+ return Scripts.names.forEach(name => {
+ return this.client.defineCommand(name, {
+ lua: Scripts.payload(name)
+ });
+ });
+ }
+
+ __runCommand__(cmd) {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ var _, deleted;
+
+ yield _this.ready;
+
+ var _ref = yield _this.client.pipeline([cmd]).exec();
+
+ var _ref2 = _slicedToArray(_ref, 1);
+
+ var _ref2$ = _slicedToArray(_ref2[0], 2);
+
+ _ = _ref2$[0];
+ deleted = _ref2$[1];
+ return deleted;
+ })();
+ }
+
+ __addLimiter__(instance) {
+ return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => {
+ return new this.Promise((resolve, reject) => {
+ return this.subscriber.subscribe(channel, () => {
+ this.limiters[channel] = instance;
+ return resolve();
+ });
+ });
+ }));
+ }
+
+ __removeLimiter__(instance) {
+ var _this2 = this;
+
+ return [instance.channel(), instance.channel_client()].forEach(
+ /*#__PURE__*/
+ function () {
+ var _ref3 = _asyncToGenerator(function* (channel) {
+ if (!_this2.terminated) {
+ yield _this2.subscriber.unsubscribe(channel);
+ }
+
+ return delete _this2.limiters[channel];
+ });
+
+ return function (_x) {
+ return _ref3.apply(this, arguments);
+ };
+ }());
+ }
+
+ __scriptArgs__(name, id, args, cb) {
+ var keys;
+ keys = Scripts.keys(name, id);
+ return [keys.length].concat(keys, args, cb);
+ }
+
+ __scriptFn__(name) {
+ return this.client[name].bind(this.client);
+ }
+
+ disconnect(flush = true) {
+ var i, k, len, ref;
+ ref = Object.keys(this.limiters);
+
+ for (i = 0, len = ref.length; i < len; i++) {
+ k = ref[i];
+ clearInterval(this.limiters[k]._store.heartbeat);
+ }
+
+ this.limiters = {};
+ this.terminated = true;
+
+ if (flush) {
+ return this.Promise.all([this.client.quit(), this.subscriber.quit()]);
+ } else {
+ this.client.disconnect();
+ this.subscriber.disconnect();
+ return this.Promise.resolve();
+ }
+ }
+
+ }
+
+ ;
+ IORedisConnection.prototype.datastore = "ioredis";
+ IORedisConnection.prototype.defaults = {
+ Redis: null,
+ clientOptions: {},
+ clusterNodes: null,
+ client: null,
+ Promise: Promise,
+ Events: null
+ };
+ return IORedisConnection;
+}.call(void 0);
+
+module.exports = IORedisConnection;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/Job.js b/node_modules/bottleneck/lib/Job.js
new file mode 100644
index 000000000..09ff6ca80
--- /dev/null
+++ b/node_modules/bottleneck/lib/Job.js
@@ -0,0 +1,215 @@
+"use strict";
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var BottleneckError, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser;
+NUM_PRIORITIES = 10;
+DEFAULT_PRIORITY = 5;
+parser = require("./parser");
+BottleneckError = require("./BottleneckError");
+Job = class Job {
+ constructor(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) {
+ this.task = task;
+ this.args = args;
+ this.rejectOnDrop = rejectOnDrop;
+ this.Events = Events;
+ this._states = _states;
+ this.Promise = Promise;
+ this.options = parser.load(options, jobDefaults);
+ this.options.priority = this._sanitizePriority(this.options.priority);
+
+ if (this.options.id === jobDefaults.id) {
+ this.options.id = `${this.options.id}-${this._randomIndex()}`;
+ }
+
+ this.promise = new this.Promise((_resolve, _reject) => {
+ this._resolve = _resolve;
+ this._reject = _reject;
+ });
+ this.retryCount = 0;
+ }
+
+ _sanitizePriority(priority) {
+ var sProperty;
+ sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority;
+
+ if (sProperty < 0) {
+ return 0;
+ } else if (sProperty > NUM_PRIORITIES - 1) {
+ return NUM_PRIORITIES - 1;
+ } else {
+ return sProperty;
+ }
+ }
+
+ _randomIndex() {
+ return Math.random().toString(36).slice(2);
+ }
+
+ doDrop({
+ error,
+ message = "This job has been dropped by Bottleneck"
+ } = {}) {
+ if (this._states.remove(this.options.id)) {
+ if (this.rejectOnDrop) {
+ this._reject(error != null ? error : new BottleneckError(message));
+ }
+
+ this.Events.trigger("dropped", {
+ args: this.args,
+ options: this.options,
+ task: this.task,
+ promise: this.promise
+ });
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ _assertStatus(expected) {
+ var status;
+ status = this._states.jobStatus(this.options.id);
+
+ if (!(status === expected || expected === "DONE" && status === null)) {
+ throw new BottleneckError(`Invalid job status ${status}, expected ${expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues`);
+ }
+ }
+
+ doReceive() {
+ this._states.start(this.options.id);
+
+ return this.Events.trigger("received", {
+ args: this.args,
+ options: this.options
+ });
+ }
+
+ doQueue(reachedHWM, blocked) {
+ this._assertStatus("RECEIVED");
+
+ this._states.next(this.options.id);
+
+ return this.Events.trigger("queued", {
+ args: this.args,
+ options: this.options,
+ reachedHWM,
+ blocked
+ });
+ }
+
+ doRun() {
+ if (this.retryCount === 0) {
+ this._assertStatus("QUEUED");
+
+ this._states.next(this.options.id);
+ } else {
+ this._assertStatus("EXECUTING");
+ }
+
+ return this.Events.trigger("scheduled", {
+ args: this.args,
+ options: this.options
+ });
+ }
+
+ doExecute(chained, clearGlobalState, run, free) {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ var error, eventInfo, passed;
+
+ if (_this.retryCount === 0) {
+ _this._assertStatus("RUNNING");
+
+ _this._states.next(_this.options.id);
+ } else {
+ _this._assertStatus("EXECUTING");
+ }
+
+ eventInfo = {
+ args: _this.args,
+ options: _this.options,
+ retryCount: _this.retryCount
+ };
+
+ _this.Events.trigger("executing", eventInfo);
+
+ try {
+ passed = yield chained != null ? chained.schedule(_this.options, _this.task, ..._this.args) : _this.task(..._this.args);
+
+ if (clearGlobalState()) {
+ _this.doDone(eventInfo);
+
+ yield free(_this.options, eventInfo);
+
+ _this._assertStatus("DONE");
+
+ return _this._resolve(passed);
+ }
+ } catch (error1) {
+ error = error1;
+ return _this._onFailure(error, eventInfo, clearGlobalState, run, free);
+ }
+ })();
+ }
+
+ doExpire(clearGlobalState, run, free) {
+ var error, eventInfo;
+
+ if (this._states.jobStatus(this.options.id === "RUNNING")) {
+ this._states.next(this.options.id);
+ }
+
+ this._assertStatus("EXECUTING");
+
+ eventInfo = {
+ args: this.args,
+ options: this.options,
+ retryCount: this.retryCount
+ };
+ error = new BottleneckError(`This job timed out after ${this.options.expiration} ms.`);
+ return this._onFailure(error, eventInfo, clearGlobalState, run, free);
+ }
+
+ _onFailure(error, eventInfo, clearGlobalState, run, free) {
+ var _this2 = this;
+
+ return _asyncToGenerator(function* () {
+ var retry, retryAfter;
+
+ if (clearGlobalState()) {
+ retry = yield _this2.Events.trigger("failed", error, eventInfo);
+
+ if (retry != null) {
+ retryAfter = ~~retry;
+
+ _this2.Events.trigger("retry", `Retrying ${_this2.options.id} after ${retryAfter} ms`, eventInfo);
+
+ _this2.retryCount++;
+ return run(retryAfter);
+ } else {
+ _this2.doDone(eventInfo);
+
+ yield free(_this2.options, eventInfo);
+
+ _this2._assertStatus("DONE");
+
+ return _this2._reject(error);
+ }
+ }
+ })();
+ }
+
+ doDone(eventInfo) {
+ this._assertStatus("EXECUTING");
+
+ this._states.next(this.options.id);
+
+ return this.Events.trigger("done", eventInfo);
+ }
+
+};
+module.exports = Job;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/LocalDatastore.js b/node_modules/bottleneck/lib/LocalDatastore.js
new file mode 100644
index 000000000..119849eda
--- /dev/null
+++ b/node_modules/bottleneck/lib/LocalDatastore.js
@@ -0,0 +1,287 @@
+"use strict";
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var BottleneckError, LocalDatastore, parser;
+parser = require("./parser");
+BottleneckError = require("./BottleneckError");
+LocalDatastore = class LocalDatastore {
+ constructor(instance, storeOptions, storeInstanceOptions) {
+ this.instance = instance;
+ this.storeOptions = storeOptions;
+ this.clientId = this.instance._randomIndex();
+ parser.load(storeInstanceOptions, storeInstanceOptions, this);
+ this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now();
+ this._running = 0;
+ this._done = 0;
+ this._unblockTime = 0;
+ this.ready = this.Promise.resolve();
+ this.clients = {};
+
+ this._startHeartbeat();
+ }
+
+ _startHeartbeat() {
+ var base;
+
+ if (this.heartbeat == null && (this.storeOptions.reservoirRefreshInterval != null && this.storeOptions.reservoirRefreshAmount != null || this.storeOptions.reservoirIncreaseInterval != null && this.storeOptions.reservoirIncreaseAmount != null)) {
+ return typeof (base = this.heartbeat = setInterval(() => {
+ var amount, incr, maximum, now, reservoir;
+ now = Date.now();
+
+ if (this.storeOptions.reservoirRefreshInterval != null && now >= this._lastReservoirRefresh + this.storeOptions.reservoirRefreshInterval) {
+ this._lastReservoirRefresh = now;
+ this.storeOptions.reservoir = this.storeOptions.reservoirRefreshAmount;
+
+ this.instance._drainAll(this.computeCapacity());
+ }
+
+ if (this.storeOptions.reservoirIncreaseInterval != null && now >= this._lastReservoirIncrease + this.storeOptions.reservoirIncreaseInterval) {
+ var _this$storeOptions = this.storeOptions;
+ amount = _this$storeOptions.reservoirIncreaseAmount;
+ maximum = _this$storeOptions.reservoirIncreaseMaximum;
+ reservoir = _this$storeOptions.reservoir;
+ this._lastReservoirIncrease = now;
+ incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount;
+
+ if (incr > 0) {
+ this.storeOptions.reservoir += incr;
+ return this.instance._drainAll(this.computeCapacity());
+ }
+ }
+ }, this.heartbeatInterval)).unref === "function" ? base.unref() : void 0;
+ } else {
+ return clearInterval(this.heartbeat);
+ }
+ }
+
+ __publish__(message) {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this.yieldLoop();
+ return _this.instance.Events.trigger("message", message.toString());
+ })();
+ }
+
+ __disconnect__(flush) {
+ var _this2 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this2.yieldLoop();
+ clearInterval(_this2.heartbeat);
+ return _this2.Promise.resolve();
+ })();
+ }
+
+ yieldLoop(t = 0) {
+ return new this.Promise(function (resolve, reject) {
+ return setTimeout(resolve, t);
+ });
+ }
+
+ computePenalty() {
+ var ref;
+ return (ref = this.storeOptions.penalty) != null ? ref : 15 * this.storeOptions.minTime || 5000;
+ }
+
+ __updateSettings__(options) {
+ var _this3 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this3.yieldLoop();
+ parser.overwrite(options, options, _this3.storeOptions);
+
+ _this3._startHeartbeat();
+
+ _this3.instance._drainAll(_this3.computeCapacity());
+
+ return true;
+ })();
+ }
+
+ __running__() {
+ var _this4 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this4.yieldLoop();
+ return _this4._running;
+ })();
+ }
+
+ __queued__() {
+ var _this5 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this5.yieldLoop();
+ return _this5.instance.queued();
+ })();
+ }
+
+ __done__() {
+ var _this6 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this6.yieldLoop();
+ return _this6._done;
+ })();
+ }
+
+ __groupCheck__(time) {
+ var _this7 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this7.yieldLoop();
+ return _this7._nextRequest + _this7.timeout < time;
+ })();
+ }
+
+ computeCapacity() {
+ var maxConcurrent, reservoir;
+ var _this$storeOptions2 = this.storeOptions;
+ maxConcurrent = _this$storeOptions2.maxConcurrent;
+ reservoir = _this$storeOptions2.reservoir;
+
+ if (maxConcurrent != null && reservoir != null) {
+ return Math.min(maxConcurrent - this._running, reservoir);
+ } else if (maxConcurrent != null) {
+ return maxConcurrent - this._running;
+ } else if (reservoir != null) {
+ return reservoir;
+ } else {
+ return null;
+ }
+ }
+
+ conditionsCheck(weight) {
+ var capacity;
+ capacity = this.computeCapacity();
+ return capacity == null || weight <= capacity;
+ }
+
+ __incrementReservoir__(incr) {
+ var _this8 = this;
+
+ return _asyncToGenerator(function* () {
+ var reservoir;
+ yield _this8.yieldLoop();
+ reservoir = _this8.storeOptions.reservoir += incr;
+
+ _this8.instance._drainAll(_this8.computeCapacity());
+
+ return reservoir;
+ })();
+ }
+
+ __currentReservoir__() {
+ var _this9 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this9.yieldLoop();
+ return _this9.storeOptions.reservoir;
+ })();
+ }
+
+ isBlocked(now) {
+ return this._unblockTime >= now;
+ }
+
+ check(weight, now) {
+ return this.conditionsCheck(weight) && this._nextRequest - now <= 0;
+ }
+
+ __check__(weight) {
+ var _this10 = this;
+
+ return _asyncToGenerator(function* () {
+ var now;
+ yield _this10.yieldLoop();
+ now = Date.now();
+ return _this10.check(weight, now);
+ })();
+ }
+
+ __register__(index, weight, expiration) {
+ var _this11 = this;
+
+ return _asyncToGenerator(function* () {
+ var now, wait;
+ yield _this11.yieldLoop();
+ now = Date.now();
+
+ if (_this11.conditionsCheck(weight)) {
+ _this11._running += weight;
+
+ if (_this11.storeOptions.reservoir != null) {
+ _this11.storeOptions.reservoir -= weight;
+ }
+
+ wait = Math.max(_this11._nextRequest - now, 0);
+ _this11._nextRequest = now + wait + _this11.storeOptions.minTime;
+ return {
+ success: true,
+ wait,
+ reservoir: _this11.storeOptions.reservoir
+ };
+ } else {
+ return {
+ success: false
+ };
+ }
+ })();
+ }
+
+ strategyIsBlock() {
+ return this.storeOptions.strategy === 3;
+ }
+
+ __submit__(queueLength, weight) {
+ var _this12 = this;
+
+ return _asyncToGenerator(function* () {
+ var blocked, now, reachedHWM;
+ yield _this12.yieldLoop();
+
+ if (_this12.storeOptions.maxConcurrent != null && weight > _this12.storeOptions.maxConcurrent) {
+ throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${_this12.storeOptions.maxConcurrent}`);
+ }
+
+ now = Date.now();
+ reachedHWM = _this12.storeOptions.highWater != null && queueLength === _this12.storeOptions.highWater && !_this12.check(weight, now);
+ blocked = _this12.strategyIsBlock() && (reachedHWM || _this12.isBlocked(now));
+
+ if (blocked) {
+ _this12._unblockTime = now + _this12.computePenalty();
+ _this12._nextRequest = _this12._unblockTime + _this12.storeOptions.minTime;
+
+ _this12.instance._dropAllQueued();
+ }
+
+ return {
+ reachedHWM,
+ blocked,
+ strategy: _this12.storeOptions.strategy
+ };
+ })();
+ }
+
+ __free__(index, weight) {
+ var _this13 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this13.yieldLoop();
+ _this13._running -= weight;
+ _this13._done += weight;
+
+ _this13.instance._drainAll(_this13.computeCapacity());
+
+ return {
+ running: _this13._running
+ };
+ })();
+ }
+
+};
+module.exports = LocalDatastore;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/Queues.js b/node_modules/bottleneck/lib/Queues.js
new file mode 100644
index 000000000..1e4129ac0
--- /dev/null
+++ b/node_modules/bottleneck/lib/Queues.js
@@ -0,0 +1,77 @@
+"use strict";
+
+var DLList, Events, Queues;
+DLList = require("./DLList");
+Events = require("./Events");
+Queues = class Queues {
+ constructor(num_priorities) {
+ var i;
+ this.Events = new Events(this);
+ this._length = 0;
+
+ this._lists = function () {
+ var j, ref, results;
+ results = [];
+
+ for (i = j = 1, ref = num_priorities; 1 <= ref ? j <= ref : j >= ref; i = 1 <= ref ? ++j : --j) {
+ results.push(new DLList(() => {
+ return this.incr();
+ }, () => {
+ return this.decr();
+ }));
+ }
+
+ return results;
+ }.call(this);
+ }
+
+ incr() {
+ if (this._length++ === 0) {
+ return this.Events.trigger("leftzero");
+ }
+ }
+
+ decr() {
+ if (--this._length === 0) {
+ return this.Events.trigger("zero");
+ }
+ }
+
+ push(job) {
+ return this._lists[job.options.priority].push(job);
+ }
+
+ queued(priority) {
+ if (priority != null) {
+ return this._lists[priority].length;
+ } else {
+ return this._length;
+ }
+ }
+
+ shiftAll(fn) {
+ return this._lists.forEach(function (list) {
+ return list.forEachShift(fn);
+ });
+ }
+
+ getFirst(arr = this._lists) {
+ var j, len, list;
+
+ for (j = 0, len = arr.length; j < len; j++) {
+ list = arr[j];
+
+ if (list.length > 0) {
+ return list;
+ }
+ }
+
+ return [];
+ }
+
+ shiftLastFrom(priority) {
+ return this.getFirst(this._lists.slice(priority).reverse()).shift();
+ }
+
+};
+module.exports = Queues;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/RedisConnection.js b/node_modules/bottleneck/lib/RedisConnection.js
new file mode 100644
index 000000000..b110704ea
--- /dev/null
+++ b/node_modules/bottleneck/lib/RedisConnection.js
@@ -0,0 +1,193 @@
+"use strict";
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var Events, RedisConnection, Scripts, parser;
+parser = require("./parser");
+Events = require("./Events");
+Scripts = require("./Scripts");
+
+RedisConnection = function () {
+ class RedisConnection {
+ constructor(options = {}) {
+ parser.load(options, this.defaults, this);
+
+ if (this.Redis == null) {
+ this.Redis = eval("require")("redis"); // Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option.
+ }
+
+ if (this.Events == null) {
+ this.Events = new Events(this);
+ }
+
+ this.terminated = false;
+
+ if (this.client == null) {
+ this.client = this.Redis.createClient(this.clientOptions);
+ }
+
+ this.subscriber = this.client.duplicate();
+ this.limiters = {};
+ this.shas = {};
+ this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => {
+ return this._loadScripts();
+ }).then(() => {
+ return {
+ client: this.client,
+ subscriber: this.subscriber
+ };
+ });
+ }
+
+ _setup(client, sub) {
+ client.setMaxListeners(0);
+ return new this.Promise((resolve, reject) => {
+ client.on("error", e => {
+ return this.Events.trigger("error", e);
+ });
+
+ if (sub) {
+ client.on("message", (channel, message) => {
+ var ref;
+ return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0;
+ });
+ }
+
+ if (client.ready) {
+ return resolve();
+ } else {
+ return client.once("ready", resolve);
+ }
+ });
+ }
+
+ _loadScript(name) {
+ return new this.Promise((resolve, reject) => {
+ var payload;
+ payload = Scripts.payload(name);
+ return this.client.multi([["script", "load", payload]]).exec((err, replies) => {
+ if (err != null) {
+ return reject(err);
+ }
+
+ this.shas[name] = replies[0];
+ return resolve(replies[0]);
+ });
+ });
+ }
+
+ _loadScripts() {
+ return this.Promise.all(Scripts.names.map(k => {
+ return this._loadScript(k);
+ }));
+ }
+
+ __runCommand__(cmd) {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this.ready;
+ return new _this.Promise((resolve, reject) => {
+ return _this.client.multi([cmd]).exec_atomic(function (err, replies) {
+ if (err != null) {
+ return reject(err);
+ } else {
+ return resolve(replies[0]);
+ }
+ });
+ });
+ })();
+ }
+
+ __addLimiter__(instance) {
+ return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => {
+ return new this.Promise((resolve, reject) => {
+ var handler;
+
+ handler = chan => {
+ if (chan === channel) {
+ this.subscriber.removeListener("subscribe", handler);
+ this.limiters[channel] = instance;
+ return resolve();
+ }
+ };
+
+ this.subscriber.on("subscribe", handler);
+ return this.subscriber.subscribe(channel);
+ });
+ }));
+ }
+
+ __removeLimiter__(instance) {
+ var _this2 = this;
+
+ return this.Promise.all([instance.channel(), instance.channel_client()].map(
+ /*#__PURE__*/
+ function () {
+ var _ref = _asyncToGenerator(function* (channel) {
+ if (!_this2.terminated) {
+ yield new _this2.Promise((resolve, reject) => {
+ return _this2.subscriber.unsubscribe(channel, function (err, chan) {
+ if (err != null) {
+ return reject(err);
+ }
+
+ if (chan === channel) {
+ return resolve();
+ }
+ });
+ });
+ }
+
+ return delete _this2.limiters[channel];
+ });
+
+ return function (_x) {
+ return _ref.apply(this, arguments);
+ };
+ }()));
+ }
+
+ __scriptArgs__(name, id, args, cb) {
+ var keys;
+ keys = Scripts.keys(name, id);
+ return [this.shas[name], keys.length].concat(keys, args, cb);
+ }
+
+ __scriptFn__(name) {
+ return this.client.evalsha.bind(this.client);
+ }
+
+ disconnect(flush = true) {
+ var i, k, len, ref;
+ ref = Object.keys(this.limiters);
+
+ for (i = 0, len = ref.length; i < len; i++) {
+ k = ref[i];
+ clearInterval(this.limiters[k]._store.heartbeat);
+ }
+
+ this.limiters = {};
+ this.terminated = true;
+ this.client.end(flush);
+ this.subscriber.end(flush);
+ return this.Promise.resolve();
+ }
+
+ }
+
+ ;
+ RedisConnection.prototype.datastore = "redis";
+ RedisConnection.prototype.defaults = {
+ Redis: null,
+ clientOptions: {},
+ client: null,
+ Promise: Promise,
+ Events: null
+ };
+ return RedisConnection;
+}.call(void 0);
+
+module.exports = RedisConnection;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/RedisDatastore.js b/node_modules/bottleneck/lib/RedisDatastore.js
new file mode 100644
index 000000000..dc5943e8a
--- /dev/null
+++ b/node_modules/bottleneck/lib/RedisDatastore.js
@@ -0,0 +1,352 @@
+"use strict";
+
+function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); }
+
+function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); }
+
+function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; }
+
+function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; }
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var BottleneckError, IORedisConnection, RedisConnection, RedisDatastore, parser;
+parser = require("./parser");
+BottleneckError = require("./BottleneckError");
+RedisConnection = require("./RedisConnection");
+IORedisConnection = require("./IORedisConnection");
+RedisDatastore = class RedisDatastore {
+ constructor(instance, storeOptions, storeInstanceOptions) {
+ this.instance = instance;
+ this.storeOptions = storeOptions;
+ this.originalId = this.instance.id;
+ this.clientId = this.instance._randomIndex();
+ parser.load(storeInstanceOptions, storeInstanceOptions, this);
+ this.clients = {};
+ this.capacityPriorityCounters = {};
+ this.sharedConnection = this.connection != null;
+
+ if (this.connection == null) {
+ this.connection = this.instance.datastore === "redis" ? new RedisConnection({
+ Redis: this.Redis,
+ clientOptions: this.clientOptions,
+ Promise: this.Promise,
+ Events: this.instance.Events
+ }) : this.instance.datastore === "ioredis" ? new IORedisConnection({
+ Redis: this.Redis,
+ clientOptions: this.clientOptions,
+ clusterNodes: this.clusterNodes,
+ Promise: this.Promise,
+ Events: this.instance.Events
+ }) : void 0;
+ }
+
+ this.instance.connection = this.connection;
+ this.instance.datastore = this.connection.datastore;
+ this.ready = this.connection.ready.then(clients => {
+ this.clients = clients;
+ return this.runScript("init", this.prepareInitSettings(this.clearDatastore));
+ }).then(() => {
+ return this.connection.__addLimiter__(this.instance);
+ }).then(() => {
+ return this.runScript("register_client", [this.instance.queued()]);
+ }).then(() => {
+ var base;
+
+ if (typeof (base = this.heartbeat = setInterval(() => {
+ return this.runScript("heartbeat", []).catch(e => {
+ return this.instance.Events.trigger("error", e);
+ });
+ }, this.heartbeatInterval)).unref === "function") {
+ base.unref();
+ }
+
+ return this.clients;
+ });
+ }
+
+ __publish__(message) {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ var client;
+
+ var _ref = yield _this.ready;
+
+ client = _ref.client;
+ return client.publish(_this.instance.channel(), `message:${message.toString()}`);
+ })();
+ }
+
+ onMessage(channel, message) {
+ var _this2 = this;
+
+ return _asyncToGenerator(function* () {
+ var capacity, counter, data, drained, e, newCapacity, pos, priorityClient, rawCapacity, type;
+
+ try {
+ pos = message.indexOf(":");
+ var _ref2 = [message.slice(0, pos), message.slice(pos + 1)];
+ type = _ref2[0];
+ data = _ref2[1];
+
+ if (type === "capacity") {
+ return yield _this2.instance._drainAll(data.length > 0 ? ~~data : void 0);
+ } else if (type === "capacity-priority") {
+ var _data$split = data.split(":");
+
+ var _data$split2 = _slicedToArray(_data$split, 3);
+
+ rawCapacity = _data$split2[0];
+ priorityClient = _data$split2[1];
+ counter = _data$split2[2];
+ capacity = rawCapacity.length > 0 ? ~~rawCapacity : void 0;
+
+ if (priorityClient === _this2.clientId) {
+ drained = yield _this2.instance._drainAll(capacity);
+ newCapacity = capacity != null ? capacity - (drained || 0) : "";
+ return yield _this2.clients.client.publish(_this2.instance.channel(), `capacity-priority:${newCapacity}::${counter}`);
+ } else if (priorityClient === "") {
+ clearTimeout(_this2.capacityPriorityCounters[counter]);
+ delete _this2.capacityPriorityCounters[counter];
+ return _this2.instance._drainAll(capacity);
+ } else {
+ return _this2.capacityPriorityCounters[counter] = setTimeout(
+ /*#__PURE__*/
+ _asyncToGenerator(function* () {
+ var e;
+
+ try {
+ delete _this2.capacityPriorityCounters[counter];
+ yield _this2.runScript("blacklist_client", [priorityClient]);
+ return yield _this2.instance._drainAll(capacity);
+ } catch (error) {
+ e = error;
+ return _this2.instance.Events.trigger("error", e);
+ }
+ }), 1000);
+ }
+ } else if (type === "message") {
+ return _this2.instance.Events.trigger("message", data);
+ } else if (type === "blocked") {
+ return yield _this2.instance._dropAllQueued();
+ }
+ } catch (error) {
+ e = error;
+ return _this2.instance.Events.trigger("error", e);
+ }
+ })();
+ }
+
+ __disconnect__(flush) {
+ clearInterval(this.heartbeat);
+
+ if (this.sharedConnection) {
+ return this.connection.__removeLimiter__(this.instance);
+ } else {
+ return this.connection.disconnect(flush);
+ }
+ }
+
+ runScript(name, args) {
+ var _this3 = this;
+
+ return _asyncToGenerator(function* () {
+ if (!(name === "init" || name === "register_client")) {
+ yield _this3.ready;
+ }
+
+ return new _this3.Promise((resolve, reject) => {
+ var all_args, arr;
+ all_args = [Date.now(), _this3.clientId].concat(args);
+
+ _this3.instance.Events.trigger("debug", `Calling Redis script: ${name}.lua`, all_args);
+
+ arr = _this3.connection.__scriptArgs__(name, _this3.originalId, all_args, function (err, replies) {
+ if (err != null) {
+ return reject(err);
+ }
+
+ return resolve(replies);
+ });
+ return _this3.connection.__scriptFn__(name)(...arr);
+ }).catch(e => {
+ if (e.message === "SETTINGS_KEY_NOT_FOUND") {
+ if (name === "heartbeat") {
+ return _this3.Promise.resolve();
+ } else {
+ return _this3.runScript("init", _this3.prepareInitSettings(false)).then(() => {
+ return _this3.runScript(name, args);
+ });
+ }
+ } else if (e.message === "UNKNOWN_CLIENT") {
+ return _this3.runScript("register_client", [_this3.instance.queued()]).then(() => {
+ return _this3.runScript(name, args);
+ });
+ } else {
+ return _this3.Promise.reject(e);
+ }
+ });
+ })();
+ }
+
+ prepareArray(arr) {
+ var i, len, results, x;
+ results = [];
+
+ for (i = 0, len = arr.length; i < len; i++) {
+ x = arr[i];
+ results.push(x != null ? x.toString() : "");
+ }
+
+ return results;
+ }
+
+ prepareObject(obj) {
+ var arr, k, v;
+ arr = [];
+
+ for (k in obj) {
+ v = obj[k];
+ arr.push(k, v != null ? v.toString() : "");
+ }
+
+ return arr;
+ }
+
+ prepareInitSettings(clear) {
+ var args;
+ args = this.prepareObject(Object.assign({}, this.storeOptions, {
+ id: this.originalId,
+ version: this.instance.version,
+ groupTimeout: this.timeout,
+ clientTimeout: this.clientTimeout
+ }));
+ args.unshift(clear ? 1 : 0, this.instance.version);
+ return args;
+ }
+
+ convertBool(b) {
+ return !!b;
+ }
+
+ __updateSettings__(options) {
+ var _this4 = this;
+
+ return _asyncToGenerator(function* () {
+ yield _this4.runScript("update_settings", _this4.prepareObject(options));
+ return parser.overwrite(options, options, _this4.storeOptions);
+ })();
+ }
+
+ __running__() {
+ return this.runScript("running", []);
+ }
+
+ __queued__() {
+ return this.runScript("queued", []);
+ }
+
+ __done__() {
+ return this.runScript("done", []);
+ }
+
+ __groupCheck__() {
+ var _this5 = this;
+
+ return _asyncToGenerator(function* () {
+ return _this5.convertBool((yield _this5.runScript("group_check", [])));
+ })();
+ }
+
+ __incrementReservoir__(incr) {
+ return this.runScript("increment_reservoir", [incr]);
+ }
+
+ __currentReservoir__() {
+ return this.runScript("current_reservoir", []);
+ }
+
+ __check__(weight) {
+ var _this6 = this;
+
+ return _asyncToGenerator(function* () {
+ return _this6.convertBool((yield _this6.runScript("check", _this6.prepareArray([weight]))));
+ })();
+ }
+
+ __register__(index, weight, expiration) {
+ var _this7 = this;
+
+ return _asyncToGenerator(function* () {
+ var reservoir, success, wait;
+
+ var _ref4 = yield _this7.runScript("register", _this7.prepareArray([index, weight, expiration]));
+
+ var _ref5 = _slicedToArray(_ref4, 3);
+
+ success = _ref5[0];
+ wait = _ref5[1];
+ reservoir = _ref5[2];
+ return {
+ success: _this7.convertBool(success),
+ wait,
+ reservoir
+ };
+ })();
+ }
+
+ __submit__(queueLength, weight) {
+ var _this8 = this;
+
+ return _asyncToGenerator(function* () {
+ var blocked, e, maxConcurrent, overweight, reachedHWM, strategy;
+
+ try {
+ var _ref6 = yield _this8.runScript("submit", _this8.prepareArray([queueLength, weight]));
+
+ var _ref7 = _slicedToArray(_ref6, 3);
+
+ reachedHWM = _ref7[0];
+ blocked = _ref7[1];
+ strategy = _ref7[2];
+ return {
+ reachedHWM: _this8.convertBool(reachedHWM),
+ blocked: _this8.convertBool(blocked),
+ strategy
+ };
+ } catch (error) {
+ e = error;
+
+ if (e.message.indexOf("OVERWEIGHT") === 0) {
+ var _e$message$split = e.message.split(":");
+
+ var _e$message$split2 = _slicedToArray(_e$message$split, 3);
+
+ overweight = _e$message$split2[0];
+ weight = _e$message$split2[1];
+ maxConcurrent = _e$message$split2[2];
+ throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${maxConcurrent}`);
+ } else {
+ throw e;
+ }
+ }
+ })();
+ }
+
+ __free__(index, weight) {
+ var _this9 = this;
+
+ return _asyncToGenerator(function* () {
+ var running;
+ running = yield _this9.runScript("free", _this9.prepareArray([index]));
+ return {
+ running
+ };
+ })();
+ }
+
+};
+module.exports = RedisDatastore;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/Scripts.js b/node_modules/bottleneck/lib/Scripts.js
new file mode 100644
index 000000000..96467eb1e
--- /dev/null
+++ b/node_modules/bottleneck/lib/Scripts.js
@@ -0,0 +1,162 @@
+"use strict";
+
+var headers, lua, templates;
+lua = require("./lua.json");
+headers = {
+ refs: lua["refs.lua"],
+ validate_keys: lua["validate_keys.lua"],
+ validate_client: lua["validate_client.lua"],
+ refresh_expiration: lua["refresh_expiration.lua"],
+ process_tick: lua["process_tick.lua"],
+ conditions_check: lua["conditions_check.lua"],
+ get_time: lua["get_time.lua"]
+};
+
+exports.allKeys = function (id) {
+ return [
+ /*
+ HASH
+ */
+ `b_${id}_settings`,
+ /*
+ HASH
+ job index -> weight
+ */
+ `b_${id}_job_weights`,
+ /*
+ ZSET
+ job index -> expiration
+ */
+ `b_${id}_job_expirations`,
+ /*
+ HASH
+ job index -> client
+ */
+ `b_${id}_job_clients`,
+ /*
+ ZSET
+ client -> sum running
+ */
+ `b_${id}_client_running`,
+ /*
+ HASH
+ client -> num queued
+ */
+ `b_${id}_client_num_queued`,
+ /*
+ ZSET
+ client -> last job registered
+ */
+ `b_${id}_client_last_registered`,
+ /*
+ ZSET
+ client -> last seen
+ */
+ `b_${id}_client_last_seen`];
+};
+
+templates = {
+ init: {
+ keys: exports.allKeys,
+ headers: ["process_tick"],
+ refresh_expiration: true,
+ code: lua["init.lua"]
+ },
+ group_check: {
+ keys: exports.allKeys,
+ headers: [],
+ refresh_expiration: false,
+ code: lua["group_check.lua"]
+ },
+ register_client: {
+ keys: exports.allKeys,
+ headers: ["validate_keys"],
+ refresh_expiration: false,
+ code: lua["register_client.lua"]
+ },
+ blacklist_client: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client"],
+ refresh_expiration: false,
+ code: lua["blacklist_client.lua"]
+ },
+ heartbeat: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["heartbeat.lua"]
+ },
+ update_settings: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: true,
+ code: lua["update_settings.lua"]
+ },
+ running: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["running.lua"]
+ },
+ queued: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client"],
+ refresh_expiration: false,
+ code: lua["queued.lua"]
+ },
+ done: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["done.lua"]
+ },
+ check: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
+ refresh_expiration: false,
+ code: lua["check.lua"]
+ },
+ submit: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
+ refresh_expiration: true,
+ code: lua["submit.lua"]
+ },
+ register: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"],
+ refresh_expiration: true,
+ code: lua["register.lua"]
+ },
+ free: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: true,
+ code: lua["free.lua"]
+ },
+ current_reservoir: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: false,
+ code: lua["current_reservoir.lua"]
+ },
+ increment_reservoir: {
+ keys: exports.allKeys,
+ headers: ["validate_keys", "validate_client", "process_tick"],
+ refresh_expiration: true,
+ code: lua["increment_reservoir.lua"]
+ }
+};
+exports.names = Object.keys(templates);
+
+exports.keys = function (name, id) {
+ return templates[name].keys(id);
+};
+
+exports.payload = function (name) {
+ var template;
+ template = templates[name];
+ return Array.prototype.concat(headers.refs, template.headers.map(function (h) {
+ return headers[h];
+ }), template.refresh_expiration ? headers.refresh_expiration : "", template.code).join("\n");
+};
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/States.js b/node_modules/bottleneck/lib/States.js
new file mode 100644
index 000000000..9b8ac1422
--- /dev/null
+++ b/node_modules/bottleneck/lib/States.js
@@ -0,0 +1,88 @@
+"use strict";
+
+var BottleneckError, States;
+BottleneckError = require("./BottleneckError");
+States = class States {
+ constructor(status1) {
+ this.status = status1;
+ this._jobs = {};
+ this.counts = this.status.map(function () {
+ return 0;
+ });
+ }
+
+ next(id) {
+ var current, next;
+ current = this._jobs[id];
+ next = current + 1;
+
+ if (current != null && next < this.status.length) {
+ this.counts[current]--;
+ this.counts[next]++;
+ return this._jobs[id]++;
+ } else if (current != null) {
+ this.counts[current]--;
+ return delete this._jobs[id];
+ }
+ }
+
+ start(id) {
+ var initial;
+ initial = 0;
+ this._jobs[id] = initial;
+ return this.counts[initial]++;
+ }
+
+ remove(id) {
+ var current;
+ current = this._jobs[id];
+
+ if (current != null) {
+ this.counts[current]--;
+ delete this._jobs[id];
+ }
+
+ return current != null;
+ }
+
+ jobStatus(id) {
+ var ref;
+ return (ref = this.status[this._jobs[id]]) != null ? ref : null;
+ }
+
+ statusJobs(status) {
+ var k, pos, ref, results, v;
+
+ if (status != null) {
+ pos = this.status.indexOf(status);
+
+ if (pos < 0) {
+ throw new BottleneckError(`status must be one of ${this.status.join(', ')}`);
+ }
+
+ ref = this._jobs;
+ results = [];
+
+ for (k in ref) {
+ v = ref[k];
+
+ if (v === pos) {
+ results.push(k);
+ }
+ }
+
+ return results;
+ } else {
+ return Object.keys(this._jobs);
+ }
+ }
+
+ statusCounts() {
+ return this.counts.reduce((acc, v, i) => {
+ acc[this.status[i]] = v;
+ return acc;
+ }, {});
+ }
+
+};
+module.exports = States;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/Sync.js b/node_modules/bottleneck/lib/Sync.js
new file mode 100644
index 000000000..f51eee4a0
--- /dev/null
+++ b/node_modules/bottleneck/lib/Sync.js
@@ -0,0 +1,80 @@
+"use strict";
+
+function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
+
+function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
+
+var DLList, Sync;
+DLList = require("./DLList");
+Sync = class Sync {
+ constructor(name, Promise) {
+ this.schedule = this.schedule.bind(this);
+ this.name = name;
+ this.Promise = Promise;
+ this._running = 0;
+ this._queue = new DLList();
+ }
+
+ isEmpty() {
+ return this._queue.length === 0;
+ }
+
+ _tryToRun() {
+ var _this = this;
+
+ return _asyncToGenerator(function* () {
+ var args, cb, error, reject, resolve, returned, task;
+
+ if (_this._running < 1 && _this._queue.length > 0) {
+ _this._running++;
+
+ var _this$_queue$shift = _this._queue.shift();
+
+ task = _this$_queue$shift.task;
+ args = _this$_queue$shift.args;
+ resolve = _this$_queue$shift.resolve;
+ reject = _this$_queue$shift.reject;
+ cb = yield _asyncToGenerator(function* () {
+ try {
+ returned = yield task(...args);
+ return function () {
+ return resolve(returned);
+ };
+ } catch (error1) {
+ error = error1;
+ return function () {
+ return reject(error);
+ };
+ }
+ })();
+ _this._running--;
+
+ _this._tryToRun();
+
+ return cb();
+ }
+ })();
+ }
+
+ schedule(task, ...args) {
+ var promise, reject, resolve;
+ resolve = reject = null;
+ promise = new this.Promise(function (_resolve, _reject) {
+ resolve = _resolve;
+ return reject = _reject;
+ });
+
+ this._queue.push({
+ task,
+ args,
+ resolve,
+ reject
+ });
+
+ this._tryToRun();
+
+ return promise;
+ }
+
+};
+module.exports = Sync;
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/es5.js b/node_modules/bottleneck/lib/es5.js
new file mode 100644
index 000000000..822a26d8c
--- /dev/null
+++ b/node_modules/bottleneck/lib/es5.js
@@ -0,0 +1,5 @@
+"use strict";
+
+require("regenerator-runtime/runtime");
+
+module.exports = require("./Bottleneck");
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/index.js b/node_modules/bottleneck/lib/index.js
new file mode 100644
index 000000000..3d447c13b
--- /dev/null
+++ b/node_modules/bottleneck/lib/index.js
@@ -0,0 +1,3 @@
+"use strict";
+
+module.exports = require("./Bottleneck");
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/lua.json b/node_modules/bottleneck/lib/lua.json
new file mode 100644
index 000000000..c17cc4990
--- /dev/null
+++ b/node_modules/bottleneck/lib/lua.json
@@ -0,0 +1,24 @@
+{
+ "blacklist_client.lua": "local blacklist = ARGV[num_static_argv + 1]\n\nif redis.call('zscore', client_last_seen_key, blacklist) then\n redis.call('zadd', client_last_seen_key, 0, blacklist)\nend\n\n\nreturn {}\n",
+ "check.lua": "local weight = tonumber(ARGV[num_static_argv + 1])\n\nlocal capacity = process_tick(now, false)['capacity']\nlocal nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest'))\n\nreturn conditions_check(capacity, weight) and nextRequest - now <= 0\n",
+ "conditions_check.lua": "local conditions_check = function (capacity, weight)\n return capacity == nil or weight <= capacity\nend\n",
+ "current_reservoir.lua": "return process_tick(now, false)['reservoir']\n",
+ "done.lua": "process_tick(now, false)\n\nreturn tonumber(redis.call('hget', settings_key, 'done'))\n",
+ "free.lua": "local index = ARGV[num_static_argv + 1]\n\nredis.call('zadd', job_expirations_key, 0, index)\n\nreturn process_tick(now, false)['running']\n",
+ "get_time.lua": "redis.replicate_commands()\n\nlocal get_time = function ()\n local time = redis.call('time')\n\n return tonumber(time[1]..string.sub(time[2], 1, 3))\nend\n",
+ "group_check.lua": "return not (redis.call('exists', settings_key) == 1)\n",
+ "heartbeat.lua": "process_tick(now, true)\n",
+ "increment_reservoir.lua": "local incr = tonumber(ARGV[num_static_argv + 1])\n\nredis.call('hincrby', settings_key, 'reservoir', incr)\n\nlocal reservoir = process_tick(now, true)['reservoir']\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn reservoir\n",
+ "init.lua": "local clear = tonumber(ARGV[num_static_argv + 1])\nlocal limiter_version = ARGV[num_static_argv + 2]\nlocal num_local_argv = num_static_argv + 2\n\nif clear == 1 then\n redis.call('del', unpack(KEYS))\nend\n\nif redis.call('exists', settings_key) == 0 then\n -- Create\n local args = {'hmset', settings_key}\n\n for i = num_local_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\n end\n\n redis.call(unpack(args))\n redis.call('hmset', settings_key,\n 'nextRequest', now,\n 'lastReservoirRefresh', now,\n 'lastReservoirIncrease', now,\n 'running', 0,\n 'done', 0,\n 'unblockTime', 0,\n 'capacityPriorityCounter', 0\n )\n\nelse\n -- Apply migrations\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'version'\n )\n local id = settings[1]\n local current_version = settings[2]\n\n if current_version ~= limiter_version then\n local version_digits = {}\n for k, v in string.gmatch(current_version, \"([^.]+)\") do\n table.insert(version_digits, tonumber(k))\n end\n\n -- 2.10.0\n if version_digits[2] < 10 then\n redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '')\n redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '')\n redis.call('hsetnx', settings_key, 'done', 0)\n redis.call('hset', settings_key, 'version', '2.10.0')\n end\n\n -- 2.11.1\n if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then\n if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then\n redis.call('hmset', settings_key,\n 'lastReservoirRefresh', now,\n 'version', '2.11.1'\n )\n end\n end\n\n -- 2.14.0\n if version_digits[2] < 14 then\n local old_running_key = 'b_'..id..'_running'\n local old_executing_key = 'b_'..id..'_executing'\n\n if redis.call('exists', old_running_key) == 1 then\n redis.call('rename', old_running_key, job_weights_key)\n end\n if redis.call('exists', old_executing_key) == 1 then\n redis.call('rename', old_executing_key, job_expirations_key)\n end\n redis.call('hset', settings_key, 'version', '2.14.0')\n end\n\n -- 2.15.2\n if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then\n redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0)\n redis.call('hset', settings_key, 'version', '2.15.2')\n end\n\n -- 2.17.0\n if version_digits[2] < 17 then\n redis.call('hsetnx', settings_key, 'clientTimeout', 10000)\n redis.call('hset', settings_key, 'version', '2.17.0')\n end\n\n -- 2.18.0\n if version_digits[2] < 18 then\n redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '')\n redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now)\n redis.call('hset', settings_key, 'version', '2.18.0')\n end\n\n end\n\n process_tick(now, false)\nend\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n",
+ "process_tick.lua": "local process_tick = function (now, always_publish)\n\n local compute_capacity = function (maxConcurrent, running, reservoir)\n if maxConcurrent ~= nil and reservoir ~= nil then\n return math.min((maxConcurrent - running), reservoir)\n elseif maxConcurrent ~= nil then\n return maxConcurrent - running\n elseif reservoir ~= nil then\n return reservoir\n else\n return nil\n end\n end\n\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'running',\n 'reservoir',\n 'reservoirRefreshInterval',\n 'reservoirRefreshAmount',\n 'lastReservoirRefresh',\n 'reservoirIncreaseInterval',\n 'reservoirIncreaseAmount',\n 'reservoirIncreaseMaximum',\n 'lastReservoirIncrease',\n 'capacityPriorityCounter',\n 'clientTimeout'\n )\n local id = settings[1]\n local maxConcurrent = tonumber(settings[2])\n local running = tonumber(settings[3])\n local reservoir = tonumber(settings[4])\n local reservoirRefreshInterval = tonumber(settings[5])\n local reservoirRefreshAmount = tonumber(settings[6])\n local lastReservoirRefresh = tonumber(settings[7])\n local reservoirIncreaseInterval = tonumber(settings[8])\n local reservoirIncreaseAmount = tonumber(settings[9])\n local reservoirIncreaseMaximum = tonumber(settings[10])\n local lastReservoirIncrease = tonumber(settings[11])\n local capacityPriorityCounter = tonumber(settings[12])\n local clientTimeout = tonumber(settings[13])\n\n local initial_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n --\n -- Process 'running' changes\n --\n local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now)\n\n if #expired > 0 then\n redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now)\n\n local flush_batch = function (batch, acc)\n local weights = redis.call('hmget', job_weights_key, unpack(batch))\n redis.call('hdel', job_weights_key, unpack(batch))\n local clients = redis.call('hmget', job_clients_key, unpack(batch))\n redis.call('hdel', job_clients_key, unpack(batch))\n\n -- Calculate sum of removed weights\n for i = 1, #weights do\n acc['total'] = acc['total'] + (tonumber(weights[i]) or 0)\n end\n\n -- Calculate sum of removed weights by client\n local client_weights = {}\n for i = 1, #clients do\n local removed = tonumber(weights[i]) or 0\n if removed > 0 then\n acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed\n end\n end\n end\n\n local acc = {\n ['total'] = 0,\n ['client_weights'] = {}\n }\n local batch_size = 1000\n\n -- Compute changes to Zsets and apply changes to Hashes\n for i = 1, #expired, batch_size do\n local batch = {}\n for j = i, math.min(i + batch_size - 1, #expired) do\n table.insert(batch, expired[j])\n end\n\n flush_batch(batch, acc)\n end\n\n -- Apply changes to Zsets\n if acc['total'] > 0 then\n redis.call('hincrby', settings_key, 'done', acc['total'])\n running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total']))\n end\n\n for client, weight in pairs(acc['client_weights']) do\n redis.call('zincrby', client_running_key, -weight, client)\n end\n end\n\n --\n -- Process 'reservoir' changes\n --\n local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil\n if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then\n reservoir = reservoirRefreshAmount\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirRefresh', now\n )\n end\n\n local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil\n if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then\n local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval)\n local incr = reservoirIncreaseAmount * num_intervals\n if reservoirIncreaseMaximum ~= nil then\n incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0))\n end\n if incr > 0 then\n reservoir = (reservoir or 0) + incr\n end\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval)\n )\n end\n\n --\n -- Clear unresponsive clients\n --\n local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout))\n local unresponsive_lookup = {}\n local terminated_clients = {}\n for i = 1, #unresponsive do\n unresponsive_lookup[unresponsive[i]] = true\n if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then\n table.insert(terminated_clients, unresponsive[i])\n end\n end\n if #terminated_clients > 0 then\n redis.call('zrem', client_running_key, unpack(terminated_clients))\n redis.call('hdel', client_num_queued_key, unpack(terminated_clients))\n redis.call('zrem', client_last_registered_key, unpack(terminated_clients))\n redis.call('zrem', client_last_seen_key, unpack(terminated_clients))\n end\n\n --\n -- Broadcast capacity changes\n --\n local final_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n if always_publish or (initial_capacity ~= nil and final_capacity == nil) then\n -- always_publish or was not unlimited, now unlimited\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n\n elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then\n -- capacity was increased\n -- send the capacity message to the limiter having the lowest number of running jobs\n -- the tiebreaker is the limiter having not registered a job in the longest time\n\n local lowest_concurrency_value = nil\n local lowest_concurrency_clients = {}\n local lowest_concurrency_last_registered = {}\n local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores')\n\n for i = 1, #client_concurrencies, 2 do\n local client = client_concurrencies[i]\n local concurrency = tonumber(client_concurrencies[i+1])\n\n if (\n lowest_concurrency_value == nil or lowest_concurrency_value == concurrency\n ) and (\n not unresponsive_lookup[client]\n ) and (\n tonumber(redis.call('hget', client_num_queued_key, client)) > 0\n ) then\n lowest_concurrency_value = concurrency\n table.insert(lowest_concurrency_clients, client)\n local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client))\n table.insert(lowest_concurrency_last_registered, last_registered)\n end\n end\n\n if #lowest_concurrency_clients > 0 then\n local position = 1\n local earliest = lowest_concurrency_last_registered[1]\n\n for i,v in ipairs(lowest_concurrency_last_registered) do\n if v < earliest then\n position = i\n earliest = v\n end\n end\n\n local next_client = lowest_concurrency_clients[position]\n redis.call('publish', 'b_'..id,\n 'capacity-priority:'..(final_capacity or '')..\n ':'..next_client..\n ':'..capacityPriorityCounter\n )\n redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1')\n else\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n end\n end\n\n return {\n ['capacity'] = final_capacity,\n ['running'] = running,\n ['reservoir'] = reservoir\n }\nend\n",
+ "queued.lua": "local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout'))\nlocal valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf')\nlocal client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients))\n\nlocal sum = 0\nfor i = 1, #client_queued do\n sum = sum + tonumber(client_queued[i])\nend\n\nreturn sum\n",
+ "refresh_expiration.lua": "local refresh_expiration = function (now, nextRequest, groupTimeout)\n\n if groupTimeout ~= nil then\n local ttl = (nextRequest + groupTimeout) - now\n\n for i = 1, #KEYS do\n redis.call('pexpire', KEYS[i], ttl)\n end\n end\n\nend\n",
+ "refs.lua": "local settings_key = KEYS[1]\nlocal job_weights_key = KEYS[2]\nlocal job_expirations_key = KEYS[3]\nlocal job_clients_key = KEYS[4]\nlocal client_running_key = KEYS[5]\nlocal client_num_queued_key = KEYS[6]\nlocal client_last_registered_key = KEYS[7]\nlocal client_last_seen_key = KEYS[8]\n\nlocal now = tonumber(ARGV[1])\nlocal client = ARGV[2]\n\nlocal num_static_argv = 2\n",
+ "register.lua": "local index = ARGV[num_static_argv + 1]\nlocal weight = tonumber(ARGV[num_static_argv + 2])\nlocal expiration = tonumber(ARGV[num_static_argv + 3])\n\nlocal state = process_tick(now, false)\nlocal capacity = state['capacity']\nlocal reservoir = state['reservoir']\n\nlocal settings = redis.call('hmget', settings_key,\n 'nextRequest',\n 'minTime',\n 'groupTimeout'\n)\nlocal nextRequest = tonumber(settings[1])\nlocal minTime = tonumber(settings[2])\nlocal groupTimeout = tonumber(settings[3])\n\nif conditions_check(capacity, weight) then\n\n redis.call('hincrby', settings_key, 'running', weight)\n redis.call('hset', job_weights_key, index, weight)\n if expiration ~= nil then\n redis.call('zadd', job_expirations_key, now + expiration, index)\n end\n redis.call('hset', job_clients_key, index, client)\n redis.call('zincrby', client_running_key, weight, client)\n redis.call('hincrby', client_num_queued_key, client, -1)\n redis.call('zadd', client_last_registered_key, now, client)\n\n local wait = math.max(nextRequest - now, 0)\n local newNextRequest = now + wait + minTime\n\n if reservoir == nil then\n redis.call('hset', settings_key,\n 'nextRequest', newNextRequest\n )\n else\n reservoir = reservoir - weight\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'nextRequest', newNextRequest\n )\n end\n\n refresh_expiration(now, newNextRequest, groupTimeout)\n\n return {true, wait, reservoir}\n\nelse\n return {false}\nend\n",
+ "register_client.lua": "local queued = tonumber(ARGV[num_static_argv + 1])\n\n-- Could have been re-registered concurrently\nif not redis.call('zscore', client_last_seen_key, client) then\n redis.call('zadd', client_running_key, 0, client)\n redis.call('hset', client_num_queued_key, client, queued)\n redis.call('zadd', client_last_registered_key, 0, client)\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n\nreturn {}\n",
+ "running.lua": "return process_tick(now, false)['running']\n",
+ "submit.lua": "local queueLength = tonumber(ARGV[num_static_argv + 1])\nlocal weight = tonumber(ARGV[num_static_argv + 2])\n\nlocal capacity = process_tick(now, false)['capacity']\n\nlocal settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'highWater',\n 'nextRequest',\n 'strategy',\n 'unblockTime',\n 'penalty',\n 'minTime',\n 'groupTimeout'\n)\nlocal id = settings[1]\nlocal maxConcurrent = tonumber(settings[2])\nlocal highWater = tonumber(settings[3])\nlocal nextRequest = tonumber(settings[4])\nlocal strategy = tonumber(settings[5])\nlocal unblockTime = tonumber(settings[6])\nlocal penalty = tonumber(settings[7])\nlocal minTime = tonumber(settings[8])\nlocal groupTimeout = tonumber(settings[9])\n\nif maxConcurrent ~= nil and weight > maxConcurrent then\n return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent)\nend\n\nlocal reachedHWM = (highWater ~= nil and queueLength == highWater\n and not (\n conditions_check(capacity, weight)\n and nextRequest - now <= 0\n )\n)\n\nlocal blocked = strategy == 3 and (reachedHWM or unblockTime >= now)\n\nif blocked then\n local computedPenalty = penalty\n if computedPenalty == nil then\n if minTime == 0 then\n computedPenalty = 5000\n else\n computedPenalty = 15 * minTime\n end\n end\n\n local newNextRequest = now + computedPenalty + minTime\n\n redis.call('hmset', settings_key,\n 'unblockTime', now + computedPenalty,\n 'nextRequest', newNextRequest\n )\n\n local clients_queued_reset = redis.call('hkeys', client_num_queued_key)\n local queued_reset = {}\n for i = 1, #clients_queued_reset do\n table.insert(queued_reset, clients_queued_reset[i])\n table.insert(queued_reset, 0)\n end\n redis.call('hmset', client_num_queued_key, unpack(queued_reset))\n\n redis.call('publish', 'b_'..id, 'blocked:')\n\n refresh_expiration(now, newNextRequest, groupTimeout)\nend\n\nif not blocked and not reachedHWM then\n redis.call('hincrby', client_num_queued_key, client, 1)\nend\n\nreturn {reachedHWM, blocked, strategy}\n",
+ "update_settings.lua": "local args = {'hmset', settings_key}\n\nfor i = num_static_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\nend\n\nredis.call(unpack(args))\n\nprocess_tick(now, true)\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n",
+ "validate_client.lua": "if not redis.call('zscore', client_last_seen_key, client) then\n return redis.error_reply('UNKNOWN_CLIENT')\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n",
+ "validate_keys.lua": "if not (redis.call('exists', settings_key) == 1) then\n return redis.error_reply('SETTINGS_KEY_NOT_FOUND')\nend\n"
+}
diff --git a/node_modules/bottleneck/lib/parser.js b/node_modules/bottleneck/lib/parser.js
new file mode 100644
index 000000000..8686191f0
--- /dev/null
+++ b/node_modules/bottleneck/lib/parser.js
@@ -0,0 +1,26 @@
+"use strict";
+
+exports.load = function (received, defaults, onto = {}) {
+ var k, ref, v;
+
+ for (k in defaults) {
+ v = defaults[k];
+ onto[k] = (ref = received[k]) != null ? ref : v;
+ }
+
+ return onto;
+};
+
+exports.overwrite = function (received, defaults, onto = {}) {
+ var k, v;
+
+ for (k in received) {
+ v = received[k];
+
+ if (defaults[k] !== void 0) {
+ onto[k] = v;
+ }
+ }
+
+ return onto;
+};
\ No newline at end of file
diff --git a/node_modules/bottleneck/lib/version.json b/node_modules/bottleneck/lib/version.json
new file mode 100644
index 000000000..578a219cf
--- /dev/null
+++ b/node_modules/bottleneck/lib/version.json
@@ -0,0 +1 @@
+{"version":"2.19.5"}
diff --git a/node_modules/bottleneck/light.js b/node_modules/bottleneck/light.js
new file mode 100644
index 000000000..c4aa26537
--- /dev/null
+++ b/node_modules/bottleneck/light.js
@@ -0,0 +1,1524 @@
+/**
+ * This file contains the Bottleneck library (MIT), compiled to ES2017, and without Clustering support.
+ * https://github.com/SGrondin/bottleneck
+ */
+(function (global, factory) {
+ typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
+ typeof define === 'function' && define.amd ? define(factory) :
+ (global.Bottleneck = factory());
+}(this, (function () { 'use strict';
+
+ var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};
+
+ function getCjsExportFromNamespace (n) {
+ return n && n['default'] || n;
+ }
+
+ var load = function(received, defaults, onto = {}) {
+ var k, ref, v;
+ for (k in defaults) {
+ v = defaults[k];
+ onto[k] = (ref = received[k]) != null ? ref : v;
+ }
+ return onto;
+ };
+
+ var overwrite = function(received, defaults, onto = {}) {
+ var k, v;
+ for (k in received) {
+ v = received[k];
+ if (defaults[k] !== void 0) {
+ onto[k] = v;
+ }
+ }
+ return onto;
+ };
+
+ var parser = {
+ load: load,
+ overwrite: overwrite
+ };
+
+ var DLList;
+
+ DLList = class DLList {
+ constructor(incr, decr) {
+ this.incr = incr;
+ this.decr = decr;
+ this._first = null;
+ this._last = null;
+ this.length = 0;
+ }
+
+ push(value) {
+ var node;
+ this.length++;
+ if (typeof this.incr === "function") {
+ this.incr();
+ }
+ node = {
+ value,
+ prev: this._last,
+ next: null
+ };
+ if (this._last != null) {
+ this._last.next = node;
+ this._last = node;
+ } else {
+ this._first = this._last = node;
+ }
+ return void 0;
+ }
+
+ shift() {
+ var value;
+ if (this._first == null) {
+ return;
+ } else {
+ this.length--;
+ if (typeof this.decr === "function") {
+ this.decr();
+ }
+ }
+ value = this._first.value;
+ if ((this._first = this._first.next) != null) {
+ this._first.prev = null;
+ } else {
+ this._last = null;
+ }
+ return value;
+ }
+
+ first() {
+ if (this._first != null) {
+ return this._first.value;
+ }
+ }
+
+ getArray() {
+ var node, ref, results;
+ node = this._first;
+ results = [];
+ while (node != null) {
+ results.push((ref = node, node = node.next, ref.value));
+ }
+ return results;
+ }
+
+ forEachShift(cb) {
+ var node;
+ node = this.shift();
+ while (node != null) {
+ (cb(node), node = this.shift());
+ }
+ return void 0;
+ }
+
+ debug() {
+ var node, ref, ref1, ref2, results;
+ node = this._first;
+ results = [];
+ while (node != null) {
+ results.push((ref = node, node = node.next, {
+ value: ref.value,
+ prev: (ref1 = ref.prev) != null ? ref1.value : void 0,
+ next: (ref2 = ref.next) != null ? ref2.value : void 0
+ }));
+ }
+ return results;
+ }
+
+ };
+
+ var DLList_1 = DLList;
+
+ var Events;
+
+ Events = class Events {
+ constructor(instance) {
+ this.instance = instance;
+ this._events = {};
+ if ((this.instance.on != null) || (this.instance.once != null) || (this.instance.removeAllListeners != null)) {
+ throw new Error("An Emitter already exists for this object");
+ }
+ this.instance.on = (name, cb) => {
+ return this._addListener(name, "many", cb);
+ };
+ this.instance.once = (name, cb) => {
+ return this._addListener(name, "once", cb);
+ };
+ this.instance.removeAllListeners = (name = null) => {
+ if (name != null) {
+ return delete this._events[name];
+ } else {
+ return this._events = {};
+ }
+ };
+ }
+
+ _addListener(name, status, cb) {
+ var base;
+ if ((base = this._events)[name] == null) {
+ base[name] = [];
+ }
+ this._events[name].push({cb, status});
+ return this.instance;
+ }
+
+ listenerCount(name) {
+ if (this._events[name] != null) {
+ return this._events[name].length;
+ } else {
+ return 0;
+ }
+ }
+
+ async trigger(name, ...args) {
+ var e, promises;
+ try {
+ if (name !== "debug") {
+ this.trigger("debug", `Event triggered: ${name}`, args);
+ }
+ if (this._events[name] == null) {
+ return;
+ }
+ this._events[name] = this._events[name].filter(function(listener) {
+ return listener.status !== "none";
+ });
+ promises = this._events[name].map(async(listener) => {
+ var e, returned;
+ if (listener.status === "none") {
+ return;
+ }
+ if (listener.status === "once") {
+ listener.status = "none";
+ }
+ try {
+ returned = typeof listener.cb === "function" ? listener.cb(...args) : void 0;
+ if (typeof (returned != null ? returned.then : void 0) === "function") {
+ return (await returned);
+ } else {
+ return returned;
+ }
+ } catch (error) {
+ e = error;
+ {
+ this.trigger("error", e);
+ }
+ return null;
+ }
+ });
+ return ((await Promise.all(promises))).find(function(x) {
+ return x != null;
+ });
+ } catch (error) {
+ e = error;
+ {
+ this.trigger("error", e);
+ }
+ return null;
+ }
+ }
+
+ };
+
+ var Events_1 = Events;
+
+ var DLList$1, Events$1, Queues;
+
+ DLList$1 = DLList_1;
+
+ Events$1 = Events_1;
+
+ Queues = class Queues {
+ constructor(num_priorities) {
+ var i;
+ this.Events = new Events$1(this);
+ this._length = 0;
+ this._lists = (function() {
+ var j, ref, results;
+ results = [];
+ for (i = j = 1, ref = num_priorities; (1 <= ref ? j <= ref : j >= ref); i = 1 <= ref ? ++j : --j) {
+ results.push(new DLList$1((() => {
+ return this.incr();
+ }), (() => {
+ return this.decr();
+ })));
+ }
+ return results;
+ }).call(this);
+ }
+
+ incr() {
+ if (this._length++ === 0) {
+ return this.Events.trigger("leftzero");
+ }
+ }
+
+ decr() {
+ if (--this._length === 0) {
+ return this.Events.trigger("zero");
+ }
+ }
+
+ push(job) {
+ return this._lists[job.options.priority].push(job);
+ }
+
+ queued(priority) {
+ if (priority != null) {
+ return this._lists[priority].length;
+ } else {
+ return this._length;
+ }
+ }
+
+ shiftAll(fn) {
+ return this._lists.forEach(function(list) {
+ return list.forEachShift(fn);
+ });
+ }
+
+ getFirst(arr = this._lists) {
+ var j, len, list;
+ for (j = 0, len = arr.length; j < len; j++) {
+ list = arr[j];
+ if (list.length > 0) {
+ return list;
+ }
+ }
+ return [];
+ }
+
+ shiftLastFrom(priority) {
+ return this.getFirst(this._lists.slice(priority).reverse()).shift();
+ }
+
+ };
+
+ var Queues_1 = Queues;
+
+ var BottleneckError;
+
+ BottleneckError = class BottleneckError extends Error {};
+
+ var BottleneckError_1 = BottleneckError;
+
+ var BottleneckError$1, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser$1;
+
+ NUM_PRIORITIES = 10;
+
+ DEFAULT_PRIORITY = 5;
+
+ parser$1 = parser;
+
+ BottleneckError$1 = BottleneckError_1;
+
+ Job = class Job {
+ constructor(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) {
+ this.task = task;
+ this.args = args;
+ this.rejectOnDrop = rejectOnDrop;
+ this.Events = Events;
+ this._states = _states;
+ this.Promise = Promise;
+ this.options = parser$1.load(options, jobDefaults);
+ this.options.priority = this._sanitizePriority(this.options.priority);
+ if (this.options.id === jobDefaults.id) {
+ this.options.id = `${this.options.id}-${this._randomIndex()}`;
+ }
+ this.promise = new this.Promise((_resolve, _reject) => {
+ this._resolve = _resolve;
+ this._reject = _reject;
+ });
+ this.retryCount = 0;
+ }
+
+ _sanitizePriority(priority) {
+ var sProperty;
+ sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority;
+ if (sProperty < 0) {
+ return 0;
+ } else if (sProperty > NUM_PRIORITIES - 1) {
+ return NUM_PRIORITIES - 1;
+ } else {
+ return sProperty;
+ }
+ }
+
+ _randomIndex() {
+ return Math.random().toString(36).slice(2);
+ }
+
+ doDrop({error, message = "This job has been dropped by Bottleneck"} = {}) {
+ if (this._states.remove(this.options.id)) {
+ if (this.rejectOnDrop) {
+ this._reject(error != null ? error : new BottleneckError$1(message));
+ }
+ this.Events.trigger("dropped", {args: this.args, options: this.options, task: this.task, promise: this.promise});
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ _assertStatus(expected) {
+ var status;
+ status = this._states.jobStatus(this.options.id);
+ if (!(status === expected || (expected === "DONE" && status === null))) {
+ throw new BottleneckError$1(`Invalid job status ${status}, expected ${expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues`);
+ }
+ }
+
+ doReceive() {
+ this._states.start(this.options.id);
+ return this.Events.trigger("received", {args: this.args, options: this.options});
+ }
+
+ doQueue(reachedHWM, blocked) {
+ this._assertStatus("RECEIVED");
+ this._states.next(this.options.id);
+ return this.Events.trigger("queued", {args: this.args, options: this.options, reachedHWM, blocked});
+ }
+
+ doRun() {
+ if (this.retryCount === 0) {
+ this._assertStatus("QUEUED");
+ this._states.next(this.options.id);
+ } else {
+ this._assertStatus("EXECUTING");
+ }
+ return this.Events.trigger("scheduled", {args: this.args, options: this.options});
+ }
+
+ async doExecute(chained, clearGlobalState, run, free) {
+ var error, eventInfo, passed;
+ if (this.retryCount === 0) {
+ this._assertStatus("RUNNING");
+ this._states.next(this.options.id);
+ } else {
+ this._assertStatus("EXECUTING");
+ }
+ eventInfo = {args: this.args, options: this.options, retryCount: this.retryCount};
+ this.Events.trigger("executing", eventInfo);
+ try {
+ passed = (await (chained != null ? chained.schedule(this.options, this.task, ...this.args) : this.task(...this.args)));
+ if (clearGlobalState()) {
+ this.doDone(eventInfo);
+ await free(this.options, eventInfo);
+ this._assertStatus("DONE");
+ return this._resolve(passed);
+ }
+ } catch (error1) {
+ error = error1;
+ return this._onFailure(error, eventInfo, clearGlobalState, run, free);
+ }
+ }
+
+ doExpire(clearGlobalState, run, free) {
+ var error, eventInfo;
+ if (this._states.jobStatus(this.options.id === "RUNNING")) {
+ this._states.next(this.options.id);
+ }
+ this._assertStatus("EXECUTING");
+ eventInfo = {args: this.args, options: this.options, retryCount: this.retryCount};
+ error = new BottleneckError$1(`This job timed out after ${this.options.expiration} ms.`);
+ return this._onFailure(error, eventInfo, clearGlobalState, run, free);
+ }
+
+ async _onFailure(error, eventInfo, clearGlobalState, run, free) {
+ var retry, retryAfter;
+ if (clearGlobalState()) {
+ retry = (await this.Events.trigger("failed", error, eventInfo));
+ if (retry != null) {
+ retryAfter = ~~retry;
+ this.Events.trigger("retry", `Retrying ${this.options.id} after ${retryAfter} ms`, eventInfo);
+ this.retryCount++;
+ return run(retryAfter);
+ } else {
+ this.doDone(eventInfo);
+ await free(this.options, eventInfo);
+ this._assertStatus("DONE");
+ return this._reject(error);
+ }
+ }
+ }
+
+ doDone(eventInfo) {
+ this._assertStatus("EXECUTING");
+ this._states.next(this.options.id);
+ return this.Events.trigger("done", eventInfo);
+ }
+
+ };
+
+ var Job_1 = Job;
+
+ var BottleneckError$2, LocalDatastore, parser$2;
+
+ parser$2 = parser;
+
+ BottleneckError$2 = BottleneckError_1;
+
+ LocalDatastore = class LocalDatastore {
+ constructor(instance, storeOptions, storeInstanceOptions) {
+ this.instance = instance;
+ this.storeOptions = storeOptions;
+ this.clientId = this.instance._randomIndex();
+ parser$2.load(storeInstanceOptions, storeInstanceOptions, this);
+ this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now();
+ this._running = 0;
+ this._done = 0;
+ this._unblockTime = 0;
+ this.ready = this.Promise.resolve();
+ this.clients = {};
+ this._startHeartbeat();
+ }
+
+ _startHeartbeat() {
+ var base;
+ if ((this.heartbeat == null) && (((this.storeOptions.reservoirRefreshInterval != null) && (this.storeOptions.reservoirRefreshAmount != null)) || ((this.storeOptions.reservoirIncreaseInterval != null) && (this.storeOptions.reservoirIncreaseAmount != null)))) {
+ return typeof (base = (this.heartbeat = setInterval(() => {
+ var amount, incr, maximum, now, reservoir;
+ now = Date.now();
+ if ((this.storeOptions.reservoirRefreshInterval != null) && now >= this._lastReservoirRefresh + this.storeOptions.reservoirRefreshInterval) {
+ this._lastReservoirRefresh = now;
+ this.storeOptions.reservoir = this.storeOptions.reservoirRefreshAmount;
+ this.instance._drainAll(this.computeCapacity());
+ }
+ if ((this.storeOptions.reservoirIncreaseInterval != null) && now >= this._lastReservoirIncrease + this.storeOptions.reservoirIncreaseInterval) {
+ ({
+ reservoirIncreaseAmount: amount,
+ reservoirIncreaseMaximum: maximum,
+ reservoir
+ } = this.storeOptions);
+ this._lastReservoirIncrease = now;
+ incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount;
+ if (incr > 0) {
+ this.storeOptions.reservoir += incr;
+ return this.instance._drainAll(this.computeCapacity());
+ }
+ }
+ }, this.heartbeatInterval))).unref === "function" ? base.unref() : void 0;
+ } else {
+ return clearInterval(this.heartbeat);
+ }
+ }
+
+ async __publish__(message) {
+ await this.yieldLoop();
+ return this.instance.Events.trigger("message", message.toString());
+ }
+
+ async __disconnect__(flush) {
+ await this.yieldLoop();
+ clearInterval(this.heartbeat);
+ return this.Promise.resolve();
+ }
+
+ yieldLoop(t = 0) {
+ return new this.Promise(function(resolve, reject) {
+ return setTimeout(resolve, t);
+ });
+ }
+
+ computePenalty() {
+ var ref;
+ return (ref = this.storeOptions.penalty) != null ? ref : (15 * this.storeOptions.minTime) || 5000;
+ }
+
+ async __updateSettings__(options) {
+ await this.yieldLoop();
+ parser$2.overwrite(options, options, this.storeOptions);
+ this._startHeartbeat();
+ this.instance._drainAll(this.computeCapacity());
+ return true;
+ }
+
+ async __running__() {
+ await this.yieldLoop();
+ return this._running;
+ }
+
+ async __queued__() {
+ await this.yieldLoop();
+ return this.instance.queued();
+ }
+
+ async __done__() {
+ await this.yieldLoop();
+ return this._done;
+ }
+
+ async __groupCheck__(time) {
+ await this.yieldLoop();
+ return (this._nextRequest + this.timeout) < time;
+ }
+
+ computeCapacity() {
+ var maxConcurrent, reservoir;
+ ({maxConcurrent, reservoir} = this.storeOptions);
+ if ((maxConcurrent != null) && (reservoir != null)) {
+ return Math.min(maxConcurrent - this._running, reservoir);
+ } else if (maxConcurrent != null) {
+ return maxConcurrent - this._running;
+ } else if (reservoir != null) {
+ return reservoir;
+ } else {
+ return null;
+ }
+ }
+
+ conditionsCheck(weight) {
+ var capacity;
+ capacity = this.computeCapacity();
+ return (capacity == null) || weight <= capacity;
+ }
+
+ async __incrementReservoir__(incr) {
+ var reservoir;
+ await this.yieldLoop();
+ reservoir = this.storeOptions.reservoir += incr;
+ this.instance._drainAll(this.computeCapacity());
+ return reservoir;
+ }
+
+ async __currentReservoir__() {
+ await this.yieldLoop();
+ return this.storeOptions.reservoir;
+ }
+
+ isBlocked(now) {
+ return this._unblockTime >= now;
+ }
+
+ check(weight, now) {
+ return this.conditionsCheck(weight) && (this._nextRequest - now) <= 0;
+ }
+
+ async __check__(weight) {
+ var now;
+ await this.yieldLoop();
+ now = Date.now();
+ return this.check(weight, now);
+ }
+
+ async __register__(index, weight, expiration) {
+ var now, wait;
+ await this.yieldLoop();
+ now = Date.now();
+ if (this.conditionsCheck(weight)) {
+ this._running += weight;
+ if (this.storeOptions.reservoir != null) {
+ this.storeOptions.reservoir -= weight;
+ }
+ wait = Math.max(this._nextRequest - now, 0);
+ this._nextRequest = now + wait + this.storeOptions.minTime;
+ return {
+ success: true,
+ wait,
+ reservoir: this.storeOptions.reservoir
+ };
+ } else {
+ return {
+ success: false
+ };
+ }
+ }
+
+ strategyIsBlock() {
+ return this.storeOptions.strategy === 3;
+ }
+
+ async __submit__(queueLength, weight) {
+ var blocked, now, reachedHWM;
+ await this.yieldLoop();
+ if ((this.storeOptions.maxConcurrent != null) && weight > this.storeOptions.maxConcurrent) {
+ throw new BottleneckError$2(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${this.storeOptions.maxConcurrent}`);
+ }
+ now = Date.now();
+ reachedHWM = (this.storeOptions.highWater != null) && queueLength === this.storeOptions.highWater && !this.check(weight, now);
+ blocked = this.strategyIsBlock() && (reachedHWM || this.isBlocked(now));
+ if (blocked) {
+ this._unblockTime = now + this.computePenalty();
+ this._nextRequest = this._unblockTime + this.storeOptions.minTime;
+ this.instance._dropAllQueued();
+ }
+ return {
+ reachedHWM,
+ blocked,
+ strategy: this.storeOptions.strategy
+ };
+ }
+
+ async __free__(index, weight) {
+ await this.yieldLoop();
+ this._running -= weight;
+ this._done += weight;
+ this.instance._drainAll(this.computeCapacity());
+ return {
+ running: this._running
+ };
+ }
+
+ };
+
+ var LocalDatastore_1 = LocalDatastore;
+
+ var BottleneckError$3, States;
+
+ BottleneckError$3 = BottleneckError_1;
+
+ States = class States {
+ constructor(status1) {
+ this.status = status1;
+ this._jobs = {};
+ this.counts = this.status.map(function() {
+ return 0;
+ });
+ }
+
+ next(id) {
+ var current, next;
+ current = this._jobs[id];
+ next = current + 1;
+ if ((current != null) && next < this.status.length) {
+ this.counts[current]--;
+ this.counts[next]++;
+ return this._jobs[id]++;
+ } else if (current != null) {
+ this.counts[current]--;
+ return delete this._jobs[id];
+ }
+ }
+
+ start(id) {
+ var initial;
+ initial = 0;
+ this._jobs[id] = initial;
+ return this.counts[initial]++;
+ }
+
+ remove(id) {
+ var current;
+ current = this._jobs[id];
+ if (current != null) {
+ this.counts[current]--;
+ delete this._jobs[id];
+ }
+ return current != null;
+ }
+
+ jobStatus(id) {
+ var ref;
+ return (ref = this.status[this._jobs[id]]) != null ? ref : null;
+ }
+
+ statusJobs(status) {
+ var k, pos, ref, results, v;
+ if (status != null) {
+ pos = this.status.indexOf(status);
+ if (pos < 0) {
+ throw new BottleneckError$3(`status must be one of ${this.status.join(', ')}`);
+ }
+ ref = this._jobs;
+ results = [];
+ for (k in ref) {
+ v = ref[k];
+ if (v === pos) {
+ results.push(k);
+ }
+ }
+ return results;
+ } else {
+ return Object.keys(this._jobs);
+ }
+ }
+
+ statusCounts() {
+ return this.counts.reduce(((acc, v, i) => {
+ acc[this.status[i]] = v;
+ return acc;
+ }), {});
+ }
+
+ };
+
+ var States_1 = States;
+
+ var DLList$2, Sync;
+
+ DLList$2 = DLList_1;
+
+ Sync = class Sync {
+ constructor(name, Promise) {
+ this.schedule = this.schedule.bind(this);
+ this.name = name;
+ this.Promise = Promise;
+ this._running = 0;
+ this._queue = new DLList$2();
+ }
+
+ isEmpty() {
+ return this._queue.length === 0;
+ }
+
+ async _tryToRun() {
+ var args, cb, error, reject, resolve, returned, task;
+ if ((this._running < 1) && this._queue.length > 0) {
+ this._running++;
+ ({task, args, resolve, reject} = this._queue.shift());
+ cb = (await (async function() {
+ try {
+ returned = (await task(...args));
+ return function() {
+ return resolve(returned);
+ };
+ } catch (error1) {
+ error = error1;
+ return function() {
+ return reject(error);
+ };
+ }
+ })());
+ this._running--;
+ this._tryToRun();
+ return cb();
+ }
+ }
+
+ schedule(task, ...args) {
+ var promise, reject, resolve;
+ resolve = reject = null;
+ promise = new this.Promise(function(_resolve, _reject) {
+ resolve = _resolve;
+ return reject = _reject;
+ });
+ this._queue.push({task, args, resolve, reject});
+ this._tryToRun();
+ return promise;
+ }
+
+ };
+
+ var Sync_1 = Sync;
+
+ var version = "2.19.5";
+ var version$1 = {
+ version: version
+ };
+
+ var version$2 = /*#__PURE__*/Object.freeze({
+ version: version,
+ default: version$1
+ });
+
+ var require$$2 = () => console.log('You must import the full version of Bottleneck in order to use this feature.');
+
+ var require$$3 = () => console.log('You must import the full version of Bottleneck in order to use this feature.');
+
+ var require$$4 = () => console.log('You must import the full version of Bottleneck in order to use this feature.');
+
+ var Events$2, Group, IORedisConnection$1, RedisConnection$1, Scripts$1, parser$3;
+
+ parser$3 = parser;
+
+ Events$2 = Events_1;
+
+ RedisConnection$1 = require$$2;
+
+ IORedisConnection$1 = require$$3;
+
+ Scripts$1 = require$$4;
+
+ Group = (function() {
+ class Group {
+ constructor(limiterOptions = {}) {
+ this.deleteKey = this.deleteKey.bind(this);
+ this.limiterOptions = limiterOptions;
+ parser$3.load(this.limiterOptions, this.defaults, this);
+ this.Events = new Events$2(this);
+ this.instances = {};
+ this.Bottleneck = Bottleneck_1;
+ this._startAutoCleanup();
+ this.sharedConnection = this.connection != null;
+ if (this.connection == null) {
+ if (this.limiterOptions.datastore === "redis") {
+ this.connection = new RedisConnection$1(Object.assign({}, this.limiterOptions, {Events: this.Events}));
+ } else if (this.limiterOptions.datastore === "ioredis") {
+ this.connection = new IORedisConnection$1(Object.assign({}, this.limiterOptions, {Events: this.Events}));
+ }
+ }
+ }
+
+ key(key = "") {
+ var ref;
+ return (ref = this.instances[key]) != null ? ref : (() => {
+ var limiter;
+ limiter = this.instances[key] = new this.Bottleneck(Object.assign(this.limiterOptions, {
+ id: `${this.id}-${key}`,
+ timeout: this.timeout,
+ connection: this.connection
+ }));
+ this.Events.trigger("created", limiter, key);
+ return limiter;
+ })();
+ }
+
+ async deleteKey(key = "") {
+ var deleted, instance;
+ instance = this.instances[key];
+ if (this.connection) {
+ deleted = (await this.connection.__runCommand__(['del', ...Scripts$1.allKeys(`${this.id}-${key}`)]));
+ }
+ if (instance != null) {
+ delete this.instances[key];
+ await instance.disconnect();
+ }
+ return (instance != null) || deleted > 0;
+ }
+
+ limiters() {
+ var k, ref, results, v;
+ ref = this.instances;
+ results = [];
+ for (k in ref) {
+ v = ref[k];
+ results.push({
+ key: k,
+ limiter: v
+ });
+ }
+ return results;
+ }
+
+ keys() {
+ return Object.keys(this.instances);
+ }
+
+ async clusterKeys() {
+ var cursor, end, found, i, k, keys, len, next, start;
+ if (this.connection == null) {
+ return this.Promise.resolve(this.keys());
+ }
+ keys = [];
+ cursor = null;
+ start = `b_${this.id}-`.length;
+ end = "_settings".length;
+ while (cursor !== 0) {
+ [next, found] = (await this.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", `b_${this.id}-*_settings`, "count", 10000]));
+ cursor = ~~next;
+ for (i = 0, len = found.length; i < len; i++) {
+ k = found[i];
+ keys.push(k.slice(start, -end));
+ }
+ }
+ return keys;
+ }
+
+ _startAutoCleanup() {
+ var base;
+ clearInterval(this.interval);
+ return typeof (base = (this.interval = setInterval(async() => {
+ var e, k, ref, results, time, v;
+ time = Date.now();
+ ref = this.instances;
+ results = [];
+ for (k in ref) {
+ v = ref[k];
+ try {
+ if ((await v._store.__groupCheck__(time))) {
+ results.push(this.deleteKey(k));
+ } else {
+ results.push(void 0);
+ }
+ } catch (error) {
+ e = error;
+ results.push(v.Events.trigger("error", e));
+ }
+ }
+ return results;
+ }, this.timeout / 2))).unref === "function" ? base.unref() : void 0;
+ }
+
+ updateSettings(options = {}) {
+ parser$3.overwrite(options, this.defaults, this);
+ parser$3.overwrite(options, options, this.limiterOptions);
+ if (options.timeout != null) {
+ return this._startAutoCleanup();
+ }
+ }
+
+ disconnect(flush = true) {
+ var ref;
+ if (!this.sharedConnection) {
+ return (ref = this.connection) != null ? ref.disconnect(flush) : void 0;
+ }
+ }
+
+ }
+ Group.prototype.defaults = {
+ timeout: 1000 * 60 * 5,
+ connection: null,
+ Promise: Promise,
+ id: "group-key"
+ };
+
+ return Group;
+
+ }).call(commonjsGlobal);
+
+ var Group_1 = Group;
+
+ var Batcher, Events$3, parser$4;
+
+ parser$4 = parser;
+
+ Events$3 = Events_1;
+
+ Batcher = (function() {
+ class Batcher {
+ constructor(options = {}) {
+ this.options = options;
+ parser$4.load(this.options, this.defaults, this);
+ this.Events = new Events$3(this);
+ this._arr = [];
+ this._resetPromise();
+ this._lastFlush = Date.now();
+ }
+
+ _resetPromise() {
+ return this._promise = new this.Promise((res, rej) => {
+ return this._resolve = res;
+ });
+ }
+
+ _flush() {
+ clearTimeout(this._timeout);
+ this._lastFlush = Date.now();
+ this._resolve();
+ this.Events.trigger("batch", this._arr);
+ this._arr = [];
+ return this._resetPromise();
+ }
+
+ add(data) {
+ var ret;
+ this._arr.push(data);
+ ret = this._promise;
+ if (this._arr.length === this.maxSize) {
+ this._flush();
+ } else if ((this.maxTime != null) && this._arr.length === 1) {
+ this._timeout = setTimeout(() => {
+ return this._flush();
+ }, this.maxTime);
+ }
+ return ret;
+ }
+
+ }
+ Batcher.prototype.defaults = {
+ maxTime: null,
+ maxSize: null,
+ Promise: Promise
+ };
+
+ return Batcher;
+
+ }).call(commonjsGlobal);
+
+ var Batcher_1 = Batcher;
+
+ var require$$4$1 = () => console.log('You must import the full version of Bottleneck in order to use this feature.');
+
+ var require$$8 = getCjsExportFromNamespace(version$2);
+
+ var Bottleneck, DEFAULT_PRIORITY$1, Events$4, Job$1, LocalDatastore$1, NUM_PRIORITIES$1, Queues$1, RedisDatastore$1, States$1, Sync$1, parser$5,
+ splice = [].splice;
+
+ NUM_PRIORITIES$1 = 10;
+
+ DEFAULT_PRIORITY$1 = 5;
+
+ parser$5 = parser;
+
+ Queues$1 = Queues_1;
+
+ Job$1 = Job_1;
+
+ LocalDatastore$1 = LocalDatastore_1;
+
+ RedisDatastore$1 = require$$4$1;
+
+ Events$4 = Events_1;
+
+ States$1 = States_1;
+
+ Sync$1 = Sync_1;
+
+ Bottleneck = (function() {
+ class Bottleneck {
+ constructor(options = {}, ...invalid) {
+ var storeInstanceOptions, storeOptions;
+ this._addToQueue = this._addToQueue.bind(this);
+ this._validateOptions(options, invalid);
+ parser$5.load(options, this.instanceDefaults, this);
+ this._queues = new Queues$1(NUM_PRIORITIES$1);
+ this._scheduled = {};
+ this._states = new States$1(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : []));
+ this._limiter = null;
+ this.Events = new Events$4(this);
+ this._submitLock = new Sync$1("submit", this.Promise);
+ this._registerLock = new Sync$1("register", this.Promise);
+ storeOptions = parser$5.load(options, this.storeDefaults, {});
+ this._store = (function() {
+ if (this.datastore === "redis" || this.datastore === "ioredis" || (this.connection != null)) {
+ storeInstanceOptions = parser$5.load(options, this.redisStoreDefaults, {});
+ return new RedisDatastore$1(this, storeOptions, storeInstanceOptions);
+ } else if (this.datastore === "local") {
+ storeInstanceOptions = parser$5.load(options, this.localStoreDefaults, {});
+ return new LocalDatastore$1(this, storeOptions, storeInstanceOptions);
+ } else {
+ throw new Bottleneck.prototype.BottleneckError(`Invalid datastore type: ${this.datastore}`);
+ }
+ }).call(this);
+ this._queues.on("leftzero", () => {
+ var ref;
+ return (ref = this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0;
+ });
+ this._queues.on("zero", () => {
+ var ref;
+ return (ref = this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0;
+ });
+ }
+
+ _validateOptions(options, invalid) {
+ if (!((options != null) && typeof options === "object" && invalid.length === 0)) {
+ throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1.");
+ }
+ }
+
+ ready() {
+ return this._store.ready;
+ }
+
+ clients() {
+ return this._store.clients;
+ }
+
+ channel() {
+ return `b_${this.id}`;
+ }
+
+ channel_client() {
+ return `b_${this.id}_${this._store.clientId}`;
+ }
+
+ publish(message) {
+ return this._store.__publish__(message);
+ }
+
+ disconnect(flush = true) {
+ return this._store.__disconnect__(flush);
+ }
+
+ chain(_limiter) {
+ this._limiter = _limiter;
+ return this;
+ }
+
+ queued(priority) {
+ return this._queues.queued(priority);
+ }
+
+ clusterQueued() {
+ return this._store.__queued__();
+ }
+
+ empty() {
+ return this.queued() === 0 && this._submitLock.isEmpty();
+ }
+
+ running() {
+ return this._store.__running__();
+ }
+
+ done() {
+ return this._store.__done__();
+ }
+
+ jobStatus(id) {
+ return this._states.jobStatus(id);
+ }
+
+ jobs(status) {
+ return this._states.statusJobs(status);
+ }
+
+ counts() {
+ return this._states.statusCounts();
+ }
+
+ _randomIndex() {
+ return Math.random().toString(36).slice(2);
+ }
+
+ check(weight = 1) {
+ return this._store.__check__(weight);
+ }
+
+ _clearGlobalState(index) {
+ if (this._scheduled[index] != null) {
+ clearTimeout(this._scheduled[index].expiration);
+ delete this._scheduled[index];
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ async _free(index, job, options, eventInfo) {
+ var e, running;
+ try {
+ ({running} = (await this._store.__free__(index, options.weight)));
+ this.Events.trigger("debug", `Freed ${options.id}`, eventInfo);
+ if (running === 0 && this.empty()) {
+ return this.Events.trigger("idle");
+ }
+ } catch (error1) {
+ e = error1;
+ return this.Events.trigger("error", e);
+ }
+ }
+
+ _run(index, job, wait) {
+ var clearGlobalState, free, run;
+ job.doRun();
+ clearGlobalState = this._clearGlobalState.bind(this, index);
+ run = this._run.bind(this, index, job);
+ free = this._free.bind(this, index, job);
+ return this._scheduled[index] = {
+ timeout: setTimeout(() => {
+ return job.doExecute(this._limiter, clearGlobalState, run, free);
+ }, wait),
+ expiration: job.options.expiration != null ? setTimeout(function() {
+ return job.doExpire(clearGlobalState, run, free);
+ }, wait + job.options.expiration) : void 0,
+ job: job
+ };
+ }
+
+ _drainOne(capacity) {
+ return this._registerLock.schedule(() => {
+ var args, index, next, options, queue;
+ if (this.queued() === 0) {
+ return this.Promise.resolve(null);
+ }
+ queue = this._queues.getFirst();
+ ({options, args} = next = queue.first());
+ if ((capacity != null) && options.weight > capacity) {
+ return this.Promise.resolve(null);
+ }
+ this.Events.trigger("debug", `Draining ${options.id}`, {args, options});
+ index = this._randomIndex();
+ return this._store.__register__(index, options.weight, options.expiration).then(({success, wait, reservoir}) => {
+ var empty;
+ this.Events.trigger("debug", `Drained ${options.id}`, {success, args, options});
+ if (success) {
+ queue.shift();
+ empty = this.empty();
+ if (empty) {
+ this.Events.trigger("empty");
+ }
+ if (reservoir === 0) {
+ this.Events.trigger("depleted", empty);
+ }
+ this._run(index, next, wait);
+ return this.Promise.resolve(options.weight);
+ } else {
+ return this.Promise.resolve(null);
+ }
+ });
+ });
+ }
+
+ _drainAll(capacity, total = 0) {
+ return this._drainOne(capacity).then((drained) => {
+ var newCapacity;
+ if (drained != null) {
+ newCapacity = capacity != null ? capacity - drained : capacity;
+ return this._drainAll(newCapacity, total + drained);
+ } else {
+ return this.Promise.resolve(total);
+ }
+ }).catch((e) => {
+ return this.Events.trigger("error", e);
+ });
+ }
+
+ _dropAllQueued(message) {
+ return this._queues.shiftAll(function(job) {
+ return job.doDrop({message});
+ });
+ }
+
+ stop(options = {}) {
+ var done, waitForExecuting;
+ options = parser$5.load(options, this.stopDefaults);
+ waitForExecuting = (at) => {
+ var finished;
+ finished = () => {
+ var counts;
+ counts = this._states.counts;
+ return (counts[0] + counts[1] + counts[2] + counts[3]) === at;
+ };
+ return new this.Promise((resolve, reject) => {
+ if (finished()) {
+ return resolve();
+ } else {
+ return this.on("done", () => {
+ if (finished()) {
+ this.removeAllListeners("done");
+ return resolve();
+ }
+ });
+ }
+ });
+ };
+ done = options.dropWaitingJobs ? (this._run = function(index, next) {
+ return next.doDrop({
+ message: options.dropErrorMessage
+ });
+ }, this._drainOne = () => {
+ return this.Promise.resolve(null);
+ }, this._registerLock.schedule(() => {
+ return this._submitLock.schedule(() => {
+ var k, ref, v;
+ ref = this._scheduled;
+ for (k in ref) {
+ v = ref[k];
+ if (this.jobStatus(v.job.options.id) === "RUNNING") {
+ clearTimeout(v.timeout);
+ clearTimeout(v.expiration);
+ v.job.doDrop({
+ message: options.dropErrorMessage
+ });
+ }
+ }
+ this._dropAllQueued(options.dropErrorMessage);
+ return waitForExecuting(0);
+ });
+ })) : this.schedule({
+ priority: NUM_PRIORITIES$1 - 1,
+ weight: 0
+ }, () => {
+ return waitForExecuting(1);
+ });
+ this._receive = function(job) {
+ return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage));
+ };
+ this.stop = () => {
+ return this.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called"));
+ };
+ return done;
+ }
+
+ async _addToQueue(job) {
+ var args, blocked, error, options, reachedHWM, shifted, strategy;
+ ({args, options} = job);
+ try {
+ ({reachedHWM, blocked, strategy} = (await this._store.__submit__(this.queued(), options.weight)));
+ } catch (error1) {
+ error = error1;
+ this.Events.trigger("debug", `Could not queue ${options.id}`, {args, options, error});
+ job.doDrop({error});
+ return false;
+ }
+ if (blocked) {
+ job.doDrop();
+ return true;
+ } else if (reachedHWM) {
+ shifted = strategy === Bottleneck.prototype.strategy.LEAK ? this._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? this._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0;
+ if (shifted != null) {
+ shifted.doDrop();
+ }
+ if ((shifted == null) || strategy === Bottleneck.prototype.strategy.OVERFLOW) {
+ if (shifted == null) {
+ job.doDrop();
+ }
+ return reachedHWM;
+ }
+ }
+ job.doQueue(reachedHWM, blocked);
+ this._queues.push(job);
+ await this._drainAll();
+ return reachedHWM;
+ }
+
+ _receive(job) {
+ if (this._states.jobStatus(job.options.id) != null) {
+ job._reject(new Bottleneck.prototype.BottleneckError(`A job with the same id already exists (id=${job.options.id})`));
+ return false;
+ } else {
+ job.doReceive();
+ return this._submitLock.schedule(this._addToQueue, job);
+ }
+ }
+
+ submit(...args) {
+ var cb, fn, job, options, ref, ref1, task;
+ if (typeof args[0] === "function") {
+ ref = args, [fn, ...args] = ref, [cb] = splice.call(args, -1);
+ options = parser$5.load({}, this.jobDefaults);
+ } else {
+ ref1 = args, [options, fn, ...args] = ref1, [cb] = splice.call(args, -1);
+ options = parser$5.load(options, this.jobDefaults);
+ }
+ task = (...args) => {
+ return new this.Promise(function(resolve, reject) {
+ return fn(...args, function(...args) {
+ return (args[0] != null ? reject : resolve)(args);
+ });
+ });
+ };
+ job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
+ job.promise.then(function(args) {
+ return typeof cb === "function" ? cb(...args) : void 0;
+ }).catch(function(args) {
+ if (Array.isArray(args)) {
+ return typeof cb === "function" ? cb(...args) : void 0;
+ } else {
+ return typeof cb === "function" ? cb(args) : void 0;
+ }
+ });
+ return this._receive(job);
+ }
+
+ schedule(...args) {
+ var job, options, task;
+ if (typeof args[0] === "function") {
+ [task, ...args] = args;
+ options = {};
+ } else {
+ [options, task, ...args] = args;
+ }
+ job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise);
+ this._receive(job);
+ return job.promise;
+ }
+
+ wrap(fn) {
+ var schedule, wrapped;
+ schedule = this.schedule.bind(this);
+ wrapped = function(...args) {
+ return schedule(fn.bind(this), ...args);
+ };
+ wrapped.withOptions = function(options, ...args) {
+ return schedule(options, fn, ...args);
+ };
+ return wrapped;
+ }
+
+ async updateSettings(options = {}) {
+ await this._store.__updateSettings__(parser$5.overwrite(options, this.storeDefaults));
+ parser$5.overwrite(options, this.instanceDefaults, this);
+ return this;
+ }
+
+ currentReservoir() {
+ return this._store.__currentReservoir__();
+ }
+
+ incrementReservoir(incr = 0) {
+ return this._store.__incrementReservoir__(incr);
+ }
+
+ }
+ Bottleneck.default = Bottleneck;
+
+ Bottleneck.Events = Events$4;
+
+ Bottleneck.version = Bottleneck.prototype.version = require$$8.version;
+
+ Bottleneck.strategy = Bottleneck.prototype.strategy = {
+ LEAK: 1,
+ OVERFLOW: 2,
+ OVERFLOW_PRIORITY: 4,
+ BLOCK: 3
+ };
+
+ Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = BottleneckError_1;
+
+ Bottleneck.Group = Bottleneck.prototype.Group = Group_1;
+
+ Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = require$$2;
+
+ Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = require$$3;
+
+ Bottleneck.Batcher = Bottleneck.prototype.Batcher = Batcher_1;
+
+ Bottleneck.prototype.jobDefaults = {
+ priority: DEFAULT_PRIORITY$1,
+ weight: 1,
+ expiration: null,
+ id: ""
+ };
+
+ Bottleneck.prototype.storeDefaults = {
+ maxConcurrent: null,
+ minTime: 0,
+ highWater: null,
+ strategy: Bottleneck.prototype.strategy.LEAK,
+ penalty: null,
+ reservoir: null,
+ reservoirRefreshInterval: null,
+ reservoirRefreshAmount: null,
+ reservoirIncreaseInterval: null,
+ reservoirIncreaseAmount: null,
+ reservoirIncreaseMaximum: null
+ };
+
+ Bottleneck.prototype.localStoreDefaults = {
+ Promise: Promise,
+ timeout: null,
+ heartbeatInterval: 250
+ };
+
+ Bottleneck.prototype.redisStoreDefaults = {
+ Promise: Promise,
+ timeout: null,
+ heartbeatInterval: 5000,
+ clientTimeout: 10000,
+ Redis: null,
+ clientOptions: {},
+ clusterNodes: null,
+ clearDatastore: false,
+ connection: null
+ };
+
+ Bottleneck.prototype.instanceDefaults = {
+ datastore: "local",
+ connection: null,
+ id: "",
+ rejectOnDrop: true,
+ trackDoneStatus: false,
+ Promise: Promise
+ };
+
+ Bottleneck.prototype.stopDefaults = {
+ enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.",
+ dropWaitingJobs: true,
+ dropErrorMessage: "This limiter has been stopped."
+ };
+
+ return Bottleneck;
+
+ }).call(commonjsGlobal);
+
+ var Bottleneck_1 = Bottleneck;
+
+ var lib = Bottleneck_1;
+
+ return lib;
+
+})));
diff --git a/node_modules/bottleneck/package.json b/node_modules/bottleneck/package.json
new file mode 100644
index 000000000..8abb77421
--- /dev/null
+++ b/node_modules/bottleneck/package.json
@@ -0,0 +1,56 @@
+{
+ "name": "bottleneck",
+ "version": "2.19.5",
+ "description": "Distributed task scheduler and rate limiter",
+ "main": "lib/index.js",
+ "typings": "bottleneck.d.ts",
+ "scripts": {
+ "test": "mocha test",
+ "test-all": "./scripts/test_all.sh"
+ },
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/SGrondin/bottleneck"
+ },
+ "keywords": [
+ "async rate limiter",
+ "rate limiter",
+ "rate limiting",
+ "async",
+ "rate",
+ "limiting",
+ "limiter",
+ "throttle",
+ "throttling",
+ "throttler",
+ "load",
+ "clustering"
+ ],
+ "author": {
+ "name": "Simon Grondin"
+ },
+ "license": "MIT",
+ "bugs": {
+ "url": "https://github.com/SGrondin/bottleneck/issues"
+ },
+ "devDependencies": {
+ "@babel/core": "^7.5.0",
+ "@babel/preset-env": "^7.5.0",
+ "@types/es6-promise": "0.0.33",
+ "assert": "^1.5.0",
+ "coffeescript": "2.4.x",
+ "ejs-cli": "github:SGrondin/ejs-cli#master",
+ "ioredis": "^4.11.1",
+ "leakage": "^0.4.0",
+ "mocha": "^6.1.4",
+ "redis": "^2.8.0",
+ "regenerator-runtime": "^0.12.1",
+ "rollup": "^0.66.6",
+ "rollup-plugin-babel": "^4.3.3",
+ "rollup-plugin-commonjs": "^9.3.4",
+ "rollup-plugin-json": "^3.1.0",
+ "rollup-plugin-node-resolve": "^3.4.0",
+ "typescript": "^2.6.2"
+ },
+ "dependencies": {}
+}
\ No newline at end of file
diff --git a/node_modules/bottleneck/rollup.config.es5.js b/node_modules/bottleneck/rollup.config.es5.js
new file mode 100644
index 000000000..8b0483e27
--- /dev/null
+++ b/node_modules/bottleneck/rollup.config.es5.js
@@ -0,0 +1,34 @@
+import json from 'rollup-plugin-json';
+import resolve from 'rollup-plugin-node-resolve';
+import commonjs from 'rollup-plugin-commonjs';
+import babel from 'rollup-plugin-babel';
+
+const bannerLines = [
+ 'This file contains the full Bottleneck library (MIT) compiled to ES5.',
+ 'https://github.com/SGrondin/bottleneck',
+ 'It also contains the regenerator-runtime (MIT), necessary for Babel-generated ES5 code to execute promise and async/await code.',
+ 'See the following link for Copyright and License information:',
+ 'https://github.com/facebook/regenerator/blob/master/packages/regenerator-runtime/runtime.js',
+].map(x => ` * ${x}`).join('\n');
+const banner = `/**\n${bannerLines}\n */`;
+
+export default {
+ input: 'lib/es5.js',
+ output: {
+ name: 'Bottleneck',
+ file: 'es5.js',
+ sourcemap: false,
+ globals: {},
+ format: 'umd',
+ banner
+ },
+ external: [],
+ plugins: [
+ json(),
+ resolve(),
+ commonjs(),
+ babel({
+ exclude: 'node_modules/**'
+ })
+ ]
+};
diff --git a/node_modules/bottleneck/rollup.config.light.js b/node_modules/bottleneck/rollup.config.light.js
new file mode 100644
index 000000000..6a72c709e
--- /dev/null
+++ b/node_modules/bottleneck/rollup.config.light.js
@@ -0,0 +1,44 @@
+import commonjs from 'rollup-plugin-commonjs';
+import json from 'rollup-plugin-json';
+import resolve from 'rollup-plugin-node-resolve';
+
+const bannerLines = [
+ 'This file contains the Bottleneck library (MIT), compiled to ES2017, and without Clustering support.',
+ 'https://github.com/SGrondin/bottleneck',
+].map(x => ` * ${x}`).join('\n');
+const banner = `/**\n${bannerLines}\n */`;
+
+const missing = `export default () => console.log('You must import the full version of Bottleneck in order to use this feature.');`;
+const exclude = [
+ 'RedisDatastore.js',
+ 'RedisConnection.js',
+ 'IORedisConnection.js',
+ 'Scripts.js'
+];
+
+export default {
+ input: 'lib/index.js',
+ output: {
+ name: 'Bottleneck',
+ file: 'light.js',
+ sourcemap: false,
+ globals: {},
+ format: 'umd',
+ banner
+ },
+ external: [],
+ plugins: [
+ json(),
+ {
+ load: id => {
+ const chunks = id.split('/');
+ const file = chunks[chunks.length - 1];
+ if (exclude.indexOf(file) >= 0) {
+ return missing
+ }
+ }
+ },
+ resolve(),
+ commonjs()
+ ]
+};
diff --git a/node_modules/bottleneck/scripts/assemble_lua.js b/node_modules/bottleneck/scripts/assemble_lua.js
new file mode 100644
index 000000000..eb7a93b79
--- /dev/null
+++ b/node_modules/bottleneck/scripts/assemble_lua.js
@@ -0,0 +1,25 @@
+var fs = require('fs')
+
+var input = __dirname + '/../src/redis'
+var loaded = {}
+
+var promises = fs.readdirSync(input).map(function (file) {
+ return new Promise(function (resolve, reject) {
+ fs.readFile(input + '/' + file, function (err, data) {
+ if (err != null) {
+ return reject(err)
+ }
+ loaded[file] = data.toString('utf8')
+ return resolve()
+ })
+ })
+})
+
+Promise.all(promises)
+.then(function () {
+ console.log(JSON.stringify(loaded, Object.keys(loaded).sort(), 2))
+})
+.catch(function (err) {
+ console.error(err)
+ process.exit(1)
+})
diff --git a/node_modules/bottleneck/scripts/build.sh b/node_modules/bottleneck/scripts/build.sh
new file mode 100755
index 000000000..4aadfc659
--- /dev/null
+++ b/node_modules/bottleneck/scripts/build.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+
+set -e
+
+if [ ! -d node_modules ]; then
+ echo "[B] Run 'npm install' first"
+ exit 1
+fi
+
+
+clean() {
+ rm -f .babelrc
+ rm -rf lib/*
+ node scripts/version.js > lib/version.json
+ node scripts/assemble_lua.js > lib/lua.json
+}
+
+makeLib10() {
+ echo '[B] Compiling Bottleneck to Node 10+...'
+ npx coffee --compile --bare --no-header src/*.coffee
+ mv src/*.js lib/
+}
+
+makeLib6() {
+ echo '[B] Compiling Bottleneck to Node 6+...'
+ ln -s .babelrc.lib .babelrc
+ npx coffee --compile --bare --no-header --transpile src/*.coffee
+ mv src/*.js lib/
+}
+
+makeES5() {
+ echo '[B] Compiling Bottleneck to ES5...'
+ ln -s .babelrc.es5 .babelrc
+ npx coffee --compile --bare --no-header src/*.coffee
+ mv src/*.js lib/
+
+ echo '[B] Assembling ES5 bundle...'
+ npx rollup -c rollup.config.es5.js
+}
+
+makeLight() {
+ makeLib10
+
+ echo '[B] Assembling light bundle...'
+ npx rollup -c rollup.config.light.js
+}
+
+makeTypings() {
+ echo '[B] Compiling and testing TS typings...'
+ npx ejs-cli bottleneck.d.ts.ejs > bottleneck.d.ts
+ npx tsc --noEmit --strict test.ts
+}
+
+if [ "$1" = 'dev' ]; then
+ clean
+ makeLib10
+elif [ "$1" = 'bench' ]; then
+ clean
+ makeLib6
+elif [ "$1" = 'es5' ]; then
+ clean
+ makeES5
+elif [ "$1" = 'light' ]; then
+ clean
+ makeLight
+elif [ "$1" = 'typings' ]; then
+ makeTypings
+else
+ clean
+ makeES5
+
+ clean
+ makeLight
+
+ clean
+ makeLib6
+ makeTypings
+fi
+
+rm -f .babelrc
+
+echo '[B] Done!'
diff --git a/node_modules/bottleneck/scripts/test_all.sh b/node_modules/bottleneck/scripts/test_all.sh
new file mode 100755
index 000000000..afc689292
--- /dev/null
+++ b/node_modules/bottleneck/scripts/test_all.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -e
+
+source .env
+
+echo 'ioredis tests'
+DATASTORE=ioredis npm test
+
+echo 'NodeRedis tests'
+DATASTORE=redis npm test
+
+echo 'ES5 bundle tests'
+BUILD=es5 npm test
+
+echo 'Light bundle tests'
+BUILD=light npm test
+
+echo 'Local tests'
+npm test
diff --git a/node_modules/bottleneck/scripts/version.js b/node_modules/bottleneck/scripts/version.js
new file mode 100644
index 000000000..75671dab0
--- /dev/null
+++ b/node_modules/bottleneck/scripts/version.js
@@ -0,0 +1,3 @@
+const packagejson = require('../package.json')
+
+console.log(JSON.stringify({version: packagejson.version}))
diff --git a/node_modules/bottleneck/src/Batcher.coffee b/node_modules/bottleneck/src/Batcher.coffee
new file mode 100644
index 000000000..5ddd66dcd
--- /dev/null
+++ b/node_modules/bottleneck/src/Batcher.coffee
@@ -0,0 +1,39 @@
+parser = require "./parser"
+Events = require "./Events"
+
+class Batcher
+ defaults:
+ maxTime: null
+ maxSize: null
+ Promise: Promise
+
+ constructor: (@options={}) ->
+ parser.load @options, @defaults, @
+ @Events = new Events @
+ @_arr = []
+ @_resetPromise()
+ @_lastFlush = Date.now()
+
+ _resetPromise: ->
+ @_promise = new @Promise (res, rej) => @_resolve = res
+
+ _flush: ->
+ clearTimeout @_timeout
+ @_lastFlush = Date.now()
+ @_resolve()
+ @Events.trigger "batch", @_arr
+ @_arr = []
+ @_resetPromise()
+
+ add: (data) ->
+ @_arr.push data
+ ret = @_promise
+ if @_arr.length == @maxSize
+ @_flush()
+ else if @maxTime? and @_arr.length == 1
+ @_timeout = setTimeout =>
+ @_flush()
+ , @maxTime
+ ret
+
+module.exports = Batcher
diff --git a/node_modules/bottleneck/src/Bottleneck.coffee b/node_modules/bottleneck/src/Bottleneck.coffee
new file mode 100644
index 000000000..37db2befc
--- /dev/null
+++ b/node_modules/bottleneck/src/Bottleneck.coffee
@@ -0,0 +1,298 @@
+NUM_PRIORITIES = 10
+DEFAULT_PRIORITY = 5
+
+parser = require "./parser"
+Queues = require "./Queues"
+Job = require "./Job"
+LocalDatastore = require "./LocalDatastore"
+RedisDatastore = require "./RedisDatastore"
+Events = require "./Events"
+States = require "./States"
+Sync = require "./Sync"
+
+class Bottleneck
+ Bottleneck.default = Bottleneck
+ Bottleneck.Events = Events
+ Bottleneck.version = Bottleneck::version = require("./version.json").version
+ Bottleneck.strategy = Bottleneck::strategy = { LEAK:1, OVERFLOW:2, OVERFLOW_PRIORITY:4, BLOCK:3 }
+ Bottleneck.BottleneckError = Bottleneck::BottleneckError = require "./BottleneckError"
+ Bottleneck.Group = Bottleneck::Group = require "./Group"
+ Bottleneck.RedisConnection = Bottleneck::RedisConnection = require "./RedisConnection"
+ Bottleneck.IORedisConnection = Bottleneck::IORedisConnection = require "./IORedisConnection"
+ Bottleneck.Batcher = Bottleneck::Batcher = require "./Batcher"
+ jobDefaults:
+ priority: DEFAULT_PRIORITY
+ weight: 1
+ expiration: null
+ id: "