From 31c2eca1679239ae31da47af68da4523490e914c Mon Sep 17 00:00:00 2001 From: Chris Gavin Date: Mon, 21 Sep 2020 11:06:21 +0100 Subject: [PATCH] Fix retrying uploads by using Octokit retry plugin. --- lib/api-client.js | 8 +- lib/api-client.js.map | 2 +- lib/upload-lib.js | 53 +- lib/upload-lib.js.map | 2 +- node_modules/@octokit/plugin-retry/LICENSE | 21 + node_modules/@octokit/plugin-retry/README.md | 105 + .../@octokit/plugin-retry/dist-node/index.js | 75 + .../plugin-retry/dist-node/index.js.map | 1 + .../plugin-retry/dist-src/error-request.js | 15 + .../@octokit/plugin-retry/dist-src/index.js | 26 + .../@octokit/plugin-retry/dist-src/version.js | 1 + .../plugin-retry/dist-src/wrap-request.js | 18 + .../dist-types/error-request.d.ts | 1 + .../plugin-retry/dist-types/index.d.ts | 6 + .../plugin-retry/dist-types/version.d.ts | 1 + .../plugin-retry/dist-types/wrap-request.d.ts | 1 + .../@octokit/plugin-retry/dist-web/index.js | 63 + .../plugin-retry/dist-web/index.js.map | 1 + .../@octokit/plugin-retry/package.json | 49 + node_modules/bottleneck/.babelrc.es5 | 5 + node_modules/bottleneck/.babelrc.lib | 9 + node_modules/bottleneck/.env | 2 + node_modules/bottleneck/.travis.yml | 25 + node_modules/bottleneck/LICENSE | 20 + node_modules/bottleneck/README.md | 1027 ++++ node_modules/bottleneck/bottleneck.d.ts | 629 ++ node_modules/bottleneck/bottleneck.d.ts.ejs | 588 ++ node_modules/bottleneck/bower.json | 30 + node_modules/bottleneck/es5.js | 5064 +++++++++++++++++ node_modules/bottleneck/lib/Batcher.js | 66 + node_modules/bottleneck/lib/Bottleneck.js | 594 ++ .../bottleneck/lib/BottleneckError.js | 5 + node_modules/bottleneck/lib/DLList.js | 107 + node_modules/bottleneck/lib/Events.js | 128 + node_modules/bottleneck/lib/Group.js | 198 + .../bottleneck/lib/IORedisConnection.js | 186 + node_modules/bottleneck/lib/Job.js | 215 + node_modules/bottleneck/lib/LocalDatastore.js | 287 + node_modules/bottleneck/lib/Queues.js | 77 + .../bottleneck/lib/RedisConnection.js | 193 + node_modules/bottleneck/lib/RedisDatastore.js | 352 ++ node_modules/bottleneck/lib/Scripts.js | 162 + node_modules/bottleneck/lib/States.js | 88 + node_modules/bottleneck/lib/Sync.js | 80 + node_modules/bottleneck/lib/es5.js | 5 + node_modules/bottleneck/lib/index.js | 3 + node_modules/bottleneck/lib/lua.json | 24 + node_modules/bottleneck/lib/parser.js | 26 + node_modules/bottleneck/lib/version.json | 1 + node_modules/bottleneck/light.js | 1524 +++++ node_modules/bottleneck/package.json | 56 + node_modules/bottleneck/rollup.config.es5.js | 34 + .../bottleneck/rollup.config.light.js | 44 + .../bottleneck/scripts/assemble_lua.js | 25 + node_modules/bottleneck/scripts/build.sh | 82 + node_modules/bottleneck/scripts/test_all.sh | 20 + node_modules/bottleneck/scripts/version.js | 3 + node_modules/bottleneck/src/Batcher.coffee | 39 + node_modules/bottleneck/src/Bottleneck.coffee | 298 + .../bottleneck/src/BottleneckError.coffee | 3 + node_modules/bottleneck/src/DLList.coffee | 38 + node_modules/bottleneck/src/Events.coffee | 38 + node_modules/bottleneck/src/Group.coffee | 80 + .../bottleneck/src/IORedisConnection.coffee | 84 + node_modules/bottleneck/src/Job.coffee | 98 + .../bottleneck/src/LocalDatastore.coffee | 140 + node_modules/bottleneck/src/Queues.coffee | 28 + .../bottleneck/src/RedisConnection.coffee | 91 + .../bottleneck/src/RedisDatastore.coffee | 158 + node_modules/bottleneck/src/Scripts.coffee | 151 + node_modules/bottleneck/src/States.coffee | 43 + node_modules/bottleneck/src/Sync.coffee | 28 + node_modules/bottleneck/src/es5.coffee | 3 + node_modules/bottleneck/src/index.coffee | 1 + node_modules/bottleneck/src/parser.coffee | 10 + .../bottleneck/src/redis/blacklist_client.lua | 8 + node_modules/bottleneck/src/redis/check.lua | 6 + .../bottleneck/src/redis/conditions_check.lua | 3 + .../src/redis/current_reservoir.lua | 1 + node_modules/bottleneck/src/redis/done.lua | 3 + node_modules/bottleneck/src/redis/free.lua | 5 + .../bottleneck/src/redis/get_time.lua | 7 + .../bottleneck/src/redis/group_check.lua | 1 + .../bottleneck/src/redis/heartbeat.lua | 1 + .../src/redis/increment_reservoir.lua | 10 + node_modules/bottleneck/src/redis/init.lua | 105 + .../bottleneck/src/redis/process_tick.lua | 214 + node_modules/bottleneck/src/redis/queued.lua | 10 + .../src/redis/refresh_expiration.lua | 11 + node_modules/bottleneck/src/redis/refs.lua | 13 + .../bottleneck/src/redis/register.lua | 51 + .../bottleneck/src/redis/register_client.lua | 12 + node_modules/bottleneck/src/redis/running.lua | 1 + node_modules/bottleneck/src/redis/submit.lua | 74 + .../bottleneck/src/redis/update_settings.lua | 14 + .../bottleneck/src/redis/validate_client.lua | 5 + .../bottleneck/src/redis/validate_keys.lua | 3 + node_modules/bottleneck/test.ts | 335 ++ node_modules/bottleneck/test/DLList.js | 148 + node_modules/bottleneck/test/batcher.js | 209 + node_modules/bottleneck/test/bottleneck.js | 7 + node_modules/bottleneck/test/cluster.js | 1549 +++++ node_modules/bottleneck/test/context.js | 142 + node_modules/bottleneck/test/general.js | 867 +++ node_modules/bottleneck/test/group.js | 255 + node_modules/bottleneck/test/ioredis.js | 135 + node_modules/bottleneck/test/node_redis.js | 100 + node_modules/bottleneck/test/priority.js | 184 + node_modules/bottleneck/test/promises.js | 202 + node_modules/bottleneck/test/retries.js | 237 + .../test/spawn/increaseKeepAlive.js | 17 + .../bottleneck/test/spawn/refreshKeepAlive.js | 17 + node_modules/bottleneck/test/states.js | 103 + node_modules/bottleneck/test/stop.js | 208 + package-lock.json | 14 + package.json | 1 + src/api-client.ts | 16 +- src/upload-lib.ts | 76 +- 118 files changed, 19026 insertions(+), 118 deletions(-) create mode 100644 node_modules/@octokit/plugin-retry/LICENSE create mode 100644 node_modules/@octokit/plugin-retry/README.md create mode 100644 node_modules/@octokit/plugin-retry/dist-node/index.js create mode 100644 node_modules/@octokit/plugin-retry/dist-node/index.js.map create mode 100644 node_modules/@octokit/plugin-retry/dist-src/error-request.js create mode 100644 node_modules/@octokit/plugin-retry/dist-src/index.js create mode 100644 node_modules/@octokit/plugin-retry/dist-src/version.js create mode 100644 node_modules/@octokit/plugin-retry/dist-src/wrap-request.js create mode 100644 node_modules/@octokit/plugin-retry/dist-types/error-request.d.ts create mode 100644 node_modules/@octokit/plugin-retry/dist-types/index.d.ts create mode 100644 node_modules/@octokit/plugin-retry/dist-types/version.d.ts create mode 100644 node_modules/@octokit/plugin-retry/dist-types/wrap-request.d.ts create mode 100644 node_modules/@octokit/plugin-retry/dist-web/index.js create mode 100644 node_modules/@octokit/plugin-retry/dist-web/index.js.map create mode 100644 node_modules/@octokit/plugin-retry/package.json create mode 100644 node_modules/bottleneck/.babelrc.es5 create mode 100644 node_modules/bottleneck/.babelrc.lib create mode 100644 node_modules/bottleneck/.env create mode 100644 node_modules/bottleneck/.travis.yml create mode 100644 node_modules/bottleneck/LICENSE create mode 100644 node_modules/bottleneck/README.md create mode 100644 node_modules/bottleneck/bottleneck.d.ts create mode 100644 node_modules/bottleneck/bottleneck.d.ts.ejs create mode 100644 node_modules/bottleneck/bower.json create mode 100644 node_modules/bottleneck/es5.js create mode 100644 node_modules/bottleneck/lib/Batcher.js create mode 100644 node_modules/bottleneck/lib/Bottleneck.js create mode 100644 node_modules/bottleneck/lib/BottleneckError.js create mode 100644 node_modules/bottleneck/lib/DLList.js create mode 100644 node_modules/bottleneck/lib/Events.js create mode 100644 node_modules/bottleneck/lib/Group.js create mode 100644 node_modules/bottleneck/lib/IORedisConnection.js create mode 100644 node_modules/bottleneck/lib/Job.js create mode 100644 node_modules/bottleneck/lib/LocalDatastore.js create mode 100644 node_modules/bottleneck/lib/Queues.js create mode 100644 node_modules/bottleneck/lib/RedisConnection.js create mode 100644 node_modules/bottleneck/lib/RedisDatastore.js create mode 100644 node_modules/bottleneck/lib/Scripts.js create mode 100644 node_modules/bottleneck/lib/States.js create mode 100644 node_modules/bottleneck/lib/Sync.js create mode 100644 node_modules/bottleneck/lib/es5.js create mode 100644 node_modules/bottleneck/lib/index.js create mode 100644 node_modules/bottleneck/lib/lua.json create mode 100644 node_modules/bottleneck/lib/parser.js create mode 100644 node_modules/bottleneck/lib/version.json create mode 100644 node_modules/bottleneck/light.js create mode 100644 node_modules/bottleneck/package.json create mode 100644 node_modules/bottleneck/rollup.config.es5.js create mode 100644 node_modules/bottleneck/rollup.config.light.js create mode 100644 node_modules/bottleneck/scripts/assemble_lua.js create mode 100755 node_modules/bottleneck/scripts/build.sh create mode 100755 node_modules/bottleneck/scripts/test_all.sh create mode 100644 node_modules/bottleneck/scripts/version.js create mode 100644 node_modules/bottleneck/src/Batcher.coffee create mode 100644 node_modules/bottleneck/src/Bottleneck.coffee create mode 100644 node_modules/bottleneck/src/BottleneckError.coffee create mode 100644 node_modules/bottleneck/src/DLList.coffee create mode 100644 node_modules/bottleneck/src/Events.coffee create mode 100644 node_modules/bottleneck/src/Group.coffee create mode 100644 node_modules/bottleneck/src/IORedisConnection.coffee create mode 100644 node_modules/bottleneck/src/Job.coffee create mode 100644 node_modules/bottleneck/src/LocalDatastore.coffee create mode 100644 node_modules/bottleneck/src/Queues.coffee create mode 100644 node_modules/bottleneck/src/RedisConnection.coffee create mode 100644 node_modules/bottleneck/src/RedisDatastore.coffee create mode 100644 node_modules/bottleneck/src/Scripts.coffee create mode 100644 node_modules/bottleneck/src/States.coffee create mode 100644 node_modules/bottleneck/src/Sync.coffee create mode 100644 node_modules/bottleneck/src/es5.coffee create mode 100644 node_modules/bottleneck/src/index.coffee create mode 100644 node_modules/bottleneck/src/parser.coffee create mode 100644 node_modules/bottleneck/src/redis/blacklist_client.lua create mode 100644 node_modules/bottleneck/src/redis/check.lua create mode 100644 node_modules/bottleneck/src/redis/conditions_check.lua create mode 100644 node_modules/bottleneck/src/redis/current_reservoir.lua create mode 100644 node_modules/bottleneck/src/redis/done.lua create mode 100644 node_modules/bottleneck/src/redis/free.lua create mode 100644 node_modules/bottleneck/src/redis/get_time.lua create mode 100644 node_modules/bottleneck/src/redis/group_check.lua create mode 100644 node_modules/bottleneck/src/redis/heartbeat.lua create mode 100644 node_modules/bottleneck/src/redis/increment_reservoir.lua create mode 100644 node_modules/bottleneck/src/redis/init.lua create mode 100644 node_modules/bottleneck/src/redis/process_tick.lua create mode 100644 node_modules/bottleneck/src/redis/queued.lua create mode 100644 node_modules/bottleneck/src/redis/refresh_expiration.lua create mode 100644 node_modules/bottleneck/src/redis/refs.lua create mode 100644 node_modules/bottleneck/src/redis/register.lua create mode 100644 node_modules/bottleneck/src/redis/register_client.lua create mode 100644 node_modules/bottleneck/src/redis/running.lua create mode 100644 node_modules/bottleneck/src/redis/submit.lua create mode 100644 node_modules/bottleneck/src/redis/update_settings.lua create mode 100644 node_modules/bottleneck/src/redis/validate_client.lua create mode 100644 node_modules/bottleneck/src/redis/validate_keys.lua create mode 100644 node_modules/bottleneck/test.ts create mode 100644 node_modules/bottleneck/test/DLList.js create mode 100644 node_modules/bottleneck/test/batcher.js create mode 100644 node_modules/bottleneck/test/bottleneck.js create mode 100644 node_modules/bottleneck/test/cluster.js create mode 100644 node_modules/bottleneck/test/context.js create mode 100644 node_modules/bottleneck/test/general.js create mode 100644 node_modules/bottleneck/test/group.js create mode 100644 node_modules/bottleneck/test/ioredis.js create mode 100644 node_modules/bottleneck/test/node_redis.js create mode 100644 node_modules/bottleneck/test/priority.js create mode 100644 node_modules/bottleneck/test/promises.js create mode 100644 node_modules/bottleneck/test/retries.js create mode 100644 node_modules/bottleneck/test/spawn/increaseKeepAlive.js create mode 100644 node_modules/bottleneck/test/spawn/refreshKeepAlive.js create mode 100644 node_modules/bottleneck/test/states.js create mode 100644 node_modules/bottleneck/test/stop.js diff --git a/lib/api-client.js b/lib/api-client.js index 765b0c1da..cef48f423 100644 --- a/lib/api-client.js +++ b/lib/api-client.js @@ -10,7 +10,8 @@ var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); -const github = __importStar(require("@actions/github")); +const githubUtils = __importStar(require("@actions/github/lib/utils")); +const retry = __importStar(require("@octokit/plugin-retry")); const console_log_level_1 = __importDefault(require("console-log-level")); const path = __importStar(require("path")); const actions_util_1 = require("./actions-util"); @@ -19,11 +20,12 @@ exports.getApiClient = function (githubAuth, githubUrl, allowLocalRun = false) { if (util_1.isLocalRun() && !allowLocalRun) { throw new Error("Invalid API call in local run"); } - return github.getOctokit(githubAuth, { + const retryingOctokit = githubUtils.GitHub.plugin(retry.retry); + return new retryingOctokit(githubUtils.getOctokitOptions(githubAuth, { baseUrl: getApiUrl(githubUrl), userAgent: "CodeQL Action", log: console_log_level_1.default({ level: "debug" }), - }); + })); }; function getApiUrl(githubUrl) { const url = new URL(githubUrl); diff --git a/lib/api-client.js.map b/lib/api-client.js.map index a41b8f192..a034ddbe6 100644 --- a/lib/api-client.js.map +++ b/lib/api-client.js.map @@ -1 +1 @@ -{"version":3,"file":"api-client.js","sourceRoot":"","sources":["../src/api-client.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,wDAA0C;AAC1C,0EAAgD;AAChD,2CAA6B;AAE7B,iDAAuE;AACvE,iCAAoC;AAEvB,QAAA,YAAY,GAAG,UAC1B,UAAkB,EAClB,SAAiB,EACjB,aAAa,GAAG,KAAK;IAErB,IAAI,iBAAU,EAAE,IAAI,CAAC,aAAa,EAAE;QAClC,MAAM,IAAI,KAAK,CAAC,+BAA+B,CAAC,CAAC;KAClD;IACD,OAAO,MAAM,CAAC,UAAU,CAAC,UAAU,EAAE;QACnC,OAAO,EAAE,SAAS,CAAC,SAAS,CAAC;QAC7B,SAAS,EAAE,eAAe;QAC1B,GAAG,EAAE,2BAAe,CAAC,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC;KACzC,CAAC,CAAC;AACL,CAAC,CAAC;AAEF,SAAS,SAAS,CAAC,SAAiB;IAClC,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC;IAE/B,kDAAkD;IAClD,0CAA0C;IAC1C,IAAI,GAAG,CAAC,QAAQ,KAAK,YAAY,IAAI,GAAG,CAAC,QAAQ,KAAK,gBAAgB,EAAE;QACtE,OAAO,wBAAwB,CAAC;KACjC;IAED,6BAA6B;IAC7B,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,EAAE,KAAK,EAAE,IAAI,CAAC,CAAC;IACpD,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;AACxB,CAAC;AAED,uFAAuF;AACvF,oFAAoF;AACpF,+CAA+C;AAC/C,SAAgB,mBAAmB,CAAC,aAAa,GAAG,KAAK;IACvD,OAAO,oBAAY,CACjB,+BAAgB,CAAC,OAAO,CAAC,EACzB,kCAAmB,CAAC,mBAAmB,CAAC,EACxC,aAAa,CACd,CAAC;AACJ,CAAC;AAND,kDAMC"} \ No newline at end of file +{"version":3,"file":"api-client.js","sourceRoot":"","sources":["../src/api-client.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,uEAAyD;AACzD,6DAA+C;AAC/C,0EAAgD;AAChD,2CAA6B;AAE7B,iDAAuE;AACvE,iCAAoC;AAEvB,QAAA,YAAY,GAAG,UAC1B,UAAkB,EAClB,SAAiB,EACjB,aAAa,GAAG,KAAK;IAErB,IAAI,iBAAU,EAAE,IAAI,CAAC,aAAa,EAAE;QAClC,MAAM,IAAI,KAAK,CAAC,+BAA+B,CAAC,CAAC;KAClD;IACD,MAAM,eAAe,GAAG,WAAW,CAAC,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;IAC/D,OAAO,IAAI,eAAe,CACxB,WAAW,CAAC,iBAAiB,CAAC,UAAU,EAAE;QACxC,OAAO,EAAE,SAAS,CAAC,SAAS,CAAC;QAC7B,SAAS,EAAE,eAAe;QAC1B,GAAG,EAAE,2BAAe,CAAC,EAAE,KAAK,EAAE,OAAO,EAAE,CAAC;KACzC,CAAC,CACH,CAAC;AACJ,CAAC,CAAC;AAEF,SAAS,SAAS,CAAC,SAAiB;IAClC,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC;IAE/B,kDAAkD;IAClD,0CAA0C;IAC1C,IAAI,GAAG,CAAC,QAAQ,KAAK,YAAY,IAAI,GAAG,CAAC,QAAQ,KAAK,gBAAgB,EAAE;QACtE,OAAO,wBAAwB,CAAC;KACjC;IAED,6BAA6B;IAC7B,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,EAAE,KAAK,EAAE,IAAI,CAAC,CAAC;IACpD,OAAO,GAAG,CAAC,QAAQ,EAAE,CAAC;AACxB,CAAC;AAED,uFAAuF;AACvF,oFAAoF;AACpF,+CAA+C;AAC/C,SAAgB,mBAAmB,CAAC,aAAa,GAAG,KAAK;IACvD,OAAO,oBAAY,CACjB,+BAAgB,CAAC,OAAO,CAAC,EACzB,kCAAmB,CAAC,mBAAmB,CAAC,EACxC,aAAa,CACd,CAAC;AACJ,CAAC;AAND,kDAMC"} \ No newline at end of file diff --git a/lib/upload-lib.js b/lib/upload-lib.js index 302c21f4f..1792eb039 100644 --- a/lib/upload-lib.js +++ b/lib/upload-lib.js @@ -50,50 +50,17 @@ async function uploadPayload(payload, repositoryNwo, githubAuth, githubUrl, mode if (testMode) { return; } - // Make up to 4 attempts to upload, and sleep for these - // number of seconds between each attempt. - // We don't want to backoff too much to avoid wasting action - // minutes, but just waiting a little bit could maybe help. - const backoffPeriods = [1, 5, 15]; const client = api.getApiClient(githubAuth, githubUrl); - for (let attempt = 0; attempt <= backoffPeriods.length; attempt++) { - const reqURL = mode === "actions" - ? "PUT /repos/:owner/:repo/code-scanning/analysis" - : "POST /repos/:owner/:repo/code-scanning/sarifs"; - const response = await client.request(reqURL, { - owner: repositoryNwo.owner, - repo: repositoryNwo.repo, - data: payload, - }); - logger.debug(`response status: ${response.status}`); - const statusCode = response.status; - if (statusCode === 202) { - logger.info("Successfully uploaded results"); - return; - } - const requestID = response.headers["x-github-request-id"]; - // On any other status code that's not 5xx mark the upload as failed - if (!statusCode || statusCode < 500 || statusCode >= 600) { - throw new Error(`Upload failed (${requestID}): (${statusCode}) ${JSON.stringify(response.data)}`); - } - // On a 5xx status code we may retry the request - if (attempt < backoffPeriods.length) { - // Log the failure as a warning but don't mark the action as failed yet - logger.warning(`Upload attempt (${attempt + 1} of ${backoffPeriods.length + 1}) failed (${requestID}). Retrying in ${backoffPeriods[attempt]} seconds: (${statusCode}) ${JSON.stringify(response.data)}`); - // Sleep for the backoff period - await new Promise((r) => setTimeout(r, backoffPeriods[attempt] * 1000)); - continue; - } - else { - // If the upload fails with 5xx then we assume it is a temporary problem - // and not an error that the user has caused or can fix. - // We avoid marking the job as failed to avoid breaking CI workflows. - throw new Error(`Upload failed (${requestID}): (${statusCode}) ${JSON.stringify(response.data)}`); - } - } - // This case shouldn't ever happen as the final iteration of the loop - // will always throw an error instead of exiting to here. - throw new Error("Upload failed"); + const reqURL = mode === "actions" + ? "PUT /repos/:owner/:repo/code-scanning/analysis" + : "POST /repos/:owner/:repo/code-scanning/sarifs"; + const response = await client.request(reqURL, { + owner: repositoryNwo.owner, + repo: repositoryNwo.repo, + data: payload, + }); + logger.debug(`response status: ${response.status}`); + logger.info("Successfully uploaded results"); } // Uploads a single sarif file or a directory of sarif files // depending on what the path happens to refer to. diff --git a/lib/upload-lib.js.map b/lib/upload-lib.js.map index 5f61cae81..ff438a816 100644 --- a/lib/upload-lib.js.map +++ b/lib/upload-lib.js.map @@ -1 +1 @@ -{"version":3,"file":"upload-lib.js","sourceRoot":"","sources":["../src/upload-lib.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,oDAAsC;AACtC,wDAA+B;AAC/B,uCAAyB;AACzB,uDAAyC;AACzC,2CAA6B;AAC7B,gDAAwB;AAExB,kDAAoC;AACpC,6DAA+C;AAG/C,gEAAkD;AAClD,6CAA+B;AAE/B,mEAAmE;AACnE,qDAAqD;AACrD,SAAgB,iBAAiB,CAAC,UAAoB;IACpD,MAAM,aAAa,GAAG;QACpB,OAAO,EAAE,IAAI;QACb,IAAI,EAAE,EAAW;KAClB,CAAC;IAEF,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE;QAClC,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC,CAAC;QACnE,sBAAsB;QACtB,IAAI,aAAa,CAAC,OAAO,KAAK,IAAI,EAAE;YAClC,aAAa,CAAC,OAAO,GAAG,WAAW,CAAC,OAAO,CAAC;SAC7C;aAAM,IAAI,aAAa,CAAC,OAAO,KAAK,WAAW,CAAC,OAAO,EAAE;YACxD,MAAM,yCAAyC,aAAa,CAAC,OAAO,QAAQ,WAAW,CAAC,OAAO,EAAE,CAAC;SACnG;QAED,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,WAAW,CAAC,IAAI,CAAC,CAAC;KAC9C;IAED,OAAO,IAAI,CAAC,SAAS,CAAC,aAAa,CAAC,CAAC;AACvC,CAAC;AAnBD,8CAmBC;AAED,4BAA4B;AAC5B,qEAAqE;AACrE,KAAK,UAAU,aAAa,CAC1B,OAAY,EACZ,aAA4B,EAC5B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,mBAAmB,CAAC,CAAC;IAEjC,sDAAsD;IACtD,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,KAAK,MAAM,IAAI,KAAK,CAAC;IAC9D,IAAI,QAAQ,EAAE;QACZ,OAAO;KACR;IAED,uDAAuD;IACvD,0CAA0C;IAC1C,4DAA4D;IAC5D,2DAA2D;IAC3D,MAAM,cAAc,GAAG,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC;IAElC,MAAM,MAAM,GAAG,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,SAAS,CAAC,CAAC;IAEvD,KAAK,IAAI,OAAO,GAAG,CAAC,EAAE,OAAO,IAAI,cAAc,CAAC,MAAM,EAAE,OAAO,EAAE,EAAE;QACjE,MAAM,MAAM,GACV,IAAI,KAAK,SAAS;YAChB,CAAC,CAAC,gDAAgD;YAClD,CAAC,CAAC,+CAA+C,CAAC;QACtD,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,EAAE;YAC5C,KAAK,EAAE,aAAa,CAAC,KAAK;YAC1B,IAAI,EAAE,aAAa,CAAC,IAAI;YACxB,IAAI,EAAE,OAAO;SACd,CAAC,CAAC;QAEH,MAAM,CAAC,KAAK,CAAC,oBAAoB,QAAQ,CAAC,MAAM,EAAE,CAAC,CAAC;QAEpD,MAAM,UAAU,GAAG,QAAQ,CAAC,MAAM,CAAC;QACnC,IAAI,UAAU,KAAK,GAAG,EAAE;YACtB,MAAM,CAAC,IAAI,CAAC,+BAA+B,CAAC,CAAC;YAC7C,OAAO;SACR;QAED,MAAM,SAAS,GAAG,QAAQ,CAAC,OAAO,CAAC,qBAAqB,CAAC,CAAC;QAE1D,oEAAoE;QACpE,IAAI,CAAC,UAAU,IAAI,UAAU,GAAG,GAAG,IAAI,UAAU,IAAI,GAAG,EAAE;YACxD,MAAM,IAAI,KAAK,CACb,kBAAkB,SAAS,OAAO,UAAU,KAAK,IAAI,CAAC,SAAS,CAC7D,QAAQ,CAAC,IAAI,CACd,EAAE,CACJ,CAAC;SACH;QAED,gDAAgD;QAChD,IAAI,OAAO,GAAG,cAAc,CAAC,MAAM,EAAE;YACnC,uEAAuE;YACvE,MAAM,CAAC,OAAO,CACZ,mBAAmB,OAAO,GAAG,CAAC,OAC5B,cAAc,CAAC,MAAM,GAAG,CAC1B,aAAa,SAAS,kBACpB,cAAc,CAAC,OAAO,CACxB,cAAc,UAAU,KAAK,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,IAAI,CAAC,EAAE,CAC7D,CAAC;YACF,+BAA+B;YAC/B,MAAM,IAAI,OAAO,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,UAAU,CAAC,CAAC,EAAE,cAAc,CAAC,OAAO,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC;YACxE,SAAS;SACV;aAAM;YACL,wEAAwE;YACxE,wDAAwD;YACxD,qEAAqE;YACrE,MAAM,IAAI,KAAK,CACb,kBAAkB,SAAS,OAAO,UAAU,KAAK,IAAI,CAAC,SAAS,CAC7D,QAAQ,CAAC,IAAI,CACd,EAAE,CACJ,CAAC;SACH;KACF;IAED,qEAAqE;IACrE,yDAAyD;IACzD,MAAM,IAAI,KAAK,CAAC,eAAe,CAAC,CAAC;AACnC,CAAC;AAWD,4DAA4D;AAC5D,kDAAkD;AAClD,qDAAqD;AAC9C,KAAK,UAAU,MAAM,CAC1B,SAAiB,EACjB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,UAAU,GAAa,EAAE,CAAC;IAChC,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;QAC7B,MAAM,IAAI,KAAK,CAAC,wBAAwB,SAAS,EAAE,CAAC,CAAC;KACtD;IACD,IAAI,EAAE,CAAC,SAAS,CAAC,SAAS,CAAC,CAAC,WAAW,EAAE,EAAE;QACzC,MAAM,KAAK,GAAG,EAAE;aACb,WAAW,CAAC,SAAS,CAAC;aACtB,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;aACnC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC;QAC1C,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;YACxB,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACvB;QACD,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;YAC3B,MAAM,IAAI,KAAK,CAAC,sCAAsC,SAAS,IAAI,CAAC,CAAC;SACtE;KACF;SAAM;QACL,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;KAC5B;IAED,OAAO,MAAM,WAAW,CACtB,UAAU,EACV,aAAa,EACb,SAAS,EACT,GAAG,EACH,WAAW,EACX,YAAY,EACZ,aAAa,EACb,YAAY,EACZ,WAAW,EACX,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;AACJ,CAAC;AAjDD,wBAiDC;AAED,uDAAuD;AACvD,SAAgB,mBAAmB,CAAC,KAAa;IAC/C,IAAI,UAAU,GAAG,CAAC,CAAC;IACnB,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE;QACxC,UAAU,IAAI,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC;KAClC;IACD,OAAO,UAAU,CAAC;AACpB,CAAC;AAND,kDAMC;AAED,mEAAmE;AACnE,0CAA0C;AAC1C,SAAgB,uBAAuB,CAAC,aAAqB,EAAE,MAAc;IAC3E,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IACjE,MAAM,MAAM,GAAG,OAAO,CAAC,iCAAiC,CAAC,CAAC;IAE1D,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,SAAS,EAAE,CAAC,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC;IAClE,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE;QACjB,+EAA+E;QAC/E,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,MAAM,EAAE;YACjC,MAAM,CAAC,UAAU,CAAC,kBAAkB,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC;YACnD,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC;YAC5C,MAAM,CAAC,QAAQ,EAAE,CAAC;SACnB;QAED,8DAA8D;QAC9D,iFAAiF;QACjF,MAAM,WAAW,GAAG,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC;QAC7D,MAAM,IAAI,KAAK,CACb,qBAAqB,aAAa,gCAAgC,WAAW,CAAC,IAAI,CAChF,IAAI,CACL,EAAE,CACJ,CAAC;KACH;AACH,CAAC;AAtBD,0DAsBC;AAED,wCAAwC;AACxC,qDAAqD;AACrD,KAAK,UAAU,WAAW,CACxB,UAAoB,EACpB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,0BAA0B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;IAEpE,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,yFAAyF;QACzF,MAAM,cAAc,GAAG,qBAAqB,CAAC;QAC7C,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC,EAAE;YAC/B,MAAM,IAAI,KAAK,CACb,uGAAuG,CACxG,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,cAAc,EAAE,cAAc,CAAC,CAAC;KACrD;IAED,4EAA4E;IAC5E,KAAK,MAAM,IAAI,IAAI,UAAU,EAAE;QAC7B,uBAAuB,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;KACvC;IAED,IAAI,YAAY,GAAG,iBAAiB,CAAC,UAAU,CAAC,CAAC;IACjD,YAAY,GAAG,YAAY,CAAC,eAAe,CACzC,YAAY,EACZ,YAAY,EACZ,MAAM,CACP,CAAC;IAEF,MAAM,YAAY,GAAG,cAAI,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IACpE,MAAM,WAAW,GAAG,kBAAO,CAAC,YAAY,CAAC,CAAC;IAE1C,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,YAAY,CAAC,CAAC;IAElD,IAAI,OAAe,CAAC;IACpB,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,YAAY,EAAE,WAAW;YACzB,aAAa,EAAE,YAAY;YAC3B,KAAK,EAAE,YAAY;YACnB,eAAe,EAAE,aAAa;YAC9B,YAAY,EAAE,WAAW;YACzB,WAAW;YACX,UAAU,EAAE,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,0BAA0B,CAAC;YAC7D,UAAU,EAAE,SAAS;SACtB,CAAC,CAAC;KACJ;SAAM;QACL,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,KAAK,EAAE,YAAY;YACnB,YAAY,EAAE,WAAW;YACzB,SAAS,EAAE,SAAS,CAAC,CAAC,CAAC;SACxB,CAAC,CAAC;KACJ;IAED,4CAA4C;IAC5C,MAAM,kBAAkB,GAAG,YAAY,CAAC,MAAM,CAAC;IAC/C,MAAM,CAAC,KAAK,CAAC,oBAAoB,kBAAkB,QAAQ,CAAC,CAAC;IAC7D,MAAM,qBAAqB,GAAG,YAAY,CAAC,MAAM,CAAC;IAClD,MAAM,CAAC,KAAK,CAAC,8BAA8B,qBAAqB,QAAQ,CAAC,CAAC;IAC1E,MAAM,gBAAgB,GAAG,mBAAmB,CAAC,YAAY,CAAC,CAAC;IAC3D,MAAM,CAAC,KAAK,CAAC,gCAAgC,gBAAgB,EAAE,CAAC,CAAC;IAEjE,kBAAkB;IAClB,MAAM,aAAa,CACjB,OAAO,EACP,aAAa,EACb,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;IAEF,OAAO;QACL,qBAAqB,EAAE,kBAAkB;QACzC,wBAAwB,EAAE,qBAAqB;QAC/C,oBAAoB,EAAE,gBAAgB;KACvC,CAAC;AACJ,CAAC"} \ No newline at end of file +{"version":3,"file":"upload-lib.js","sourceRoot":"","sources":["../src/upload-lib.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,oDAAsC;AACtC,wDAA+B;AAC/B,uCAAyB;AACzB,uDAAyC;AACzC,2CAA6B;AAC7B,gDAAwB;AAExB,kDAAoC;AACpC,6DAA+C;AAG/C,gEAAkD;AAClD,6CAA+B;AAE/B,mEAAmE;AACnE,qDAAqD;AACrD,SAAgB,iBAAiB,CAAC,UAAoB;IACpD,MAAM,aAAa,GAAG;QACpB,OAAO,EAAE,IAAI;QACb,IAAI,EAAE,EAAW;KAClB,CAAC;IAEF,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE;QAClC,MAAM,WAAW,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC,CAAC;QACnE,sBAAsB;QACtB,IAAI,aAAa,CAAC,OAAO,KAAK,IAAI,EAAE;YAClC,aAAa,CAAC,OAAO,GAAG,WAAW,CAAC,OAAO,CAAC;SAC7C;aAAM,IAAI,aAAa,CAAC,OAAO,KAAK,WAAW,CAAC,OAAO,EAAE;YACxD,MAAM,yCAAyC,aAAa,CAAC,OAAO,QAAQ,WAAW,CAAC,OAAO,EAAE,CAAC;SACnG;QAED,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,WAAW,CAAC,IAAI,CAAC,CAAC;KAC9C;IAED,OAAO,IAAI,CAAC,SAAS,CAAC,aAAa,CAAC,CAAC;AACvC,CAAC;AAnBD,8CAmBC;AAED,4BAA4B;AAC5B,qEAAqE;AACrE,KAAK,UAAU,aAAa,CAC1B,OAAY,EACZ,aAA4B,EAC5B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,mBAAmB,CAAC,CAAC;IAEjC,sDAAsD;IACtD,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,KAAK,MAAM,IAAI,KAAK,CAAC;IAC9D,IAAI,QAAQ,EAAE;QACZ,OAAO;KACR;IAED,MAAM,MAAM,GAAG,GAAG,CAAC,YAAY,CAAC,UAAU,EAAE,SAAS,CAAC,CAAC;IAEvD,MAAM,MAAM,GACV,IAAI,KAAK,SAAS;QAChB,CAAC,CAAC,gDAAgD;QAClD,CAAC,CAAC,+CAA+C,CAAC;IACtD,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,EAAE;QAC5C,KAAK,EAAE,aAAa,CAAC,KAAK;QAC1B,IAAI,EAAE,aAAa,CAAC,IAAI;QACxB,IAAI,EAAE,OAAO;KACd,CAAC,CAAC;IAEH,MAAM,CAAC,KAAK,CAAC,oBAAoB,QAAQ,CAAC,MAAM,EAAE,CAAC,CAAC;IACpD,MAAM,CAAC,IAAI,CAAC,+BAA+B,CAAC,CAAC;AAC/C,CAAC;AAWD,4DAA4D;AAC5D,kDAAkD;AAClD,qDAAqD;AAC9C,KAAK,UAAU,MAAM,CAC1B,SAAiB,EACjB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,UAAU,GAAa,EAAE,CAAC;IAChC,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE;QAC7B,MAAM,IAAI,KAAK,CAAC,wBAAwB,SAAS,EAAE,CAAC,CAAC;KACtD;IACD,IAAI,EAAE,CAAC,SAAS,CAAC,SAAS,CAAC,CAAC,WAAW,EAAE,EAAE;QACzC,MAAM,KAAK,GAAG,EAAE;aACb,WAAW,CAAC,SAAS,CAAC;aACtB,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;aACnC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC;QAC1C,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE;YACxB,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;SACvB;QACD,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;YAC3B,MAAM,IAAI,KAAK,CAAC,sCAAsC,SAAS,IAAI,CAAC,CAAC;SACtE;KACF;SAAM;QACL,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;KAC5B;IAED,OAAO,MAAM,WAAW,CACtB,UAAU,EACV,aAAa,EACb,SAAS,EACT,GAAG,EACH,WAAW,EACX,YAAY,EACZ,aAAa,EACb,YAAY,EACZ,WAAW,EACX,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;AACJ,CAAC;AAjDD,wBAiDC;AAED,uDAAuD;AACvD,SAAgB,mBAAmB,CAAC,KAAa;IAC/C,IAAI,UAAU,GAAG,CAAC,CAAC;IACnB,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE;QACxC,UAAU,IAAI,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC;KAClC;IACD,OAAO,UAAU,CAAC;AACpB,CAAC;AAND,kDAMC;AAED,mEAAmE;AACnE,0CAA0C;AAC1C,SAAgB,uBAAuB,CAAC,aAAqB,EAAE,MAAc;IAC3E,MAAM,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC,YAAY,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IACjE,MAAM,MAAM,GAAG,OAAO,CAAC,iCAAiC,CAAC,CAAC;IAE1D,MAAM,MAAM,GAAG,IAAI,UAAU,CAAC,SAAS,EAAE,CAAC,QAAQ,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC;IAClE,IAAI,CAAC,MAAM,CAAC,KAAK,EAAE;QACjB,+EAA+E;QAC/E,KAAK,MAAM,KAAK,IAAI,MAAM,CAAC,MAAM,EAAE;YACjC,MAAM,CAAC,UAAU,CAAC,kBAAkB,KAAK,CAAC,KAAK,EAAE,CAAC,CAAC;YACnD,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC;YAC5C,MAAM,CAAC,QAAQ,EAAE,CAAC;SACnB;QAED,8DAA8D;QAC9D,iFAAiF;QACjF,MAAM,WAAW,GAAG,MAAM,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC;QAC7D,MAAM,IAAI,KAAK,CACb,qBAAqB,aAAa,gCAAgC,WAAW,CAAC,IAAI,CAChF,IAAI,CACL,EAAE,CACJ,CAAC;KACH;AACH,CAAC;AAtBD,0DAsBC;AAED,wCAAwC;AACxC,qDAAqD;AACrD,KAAK,UAAU,WAAW,CACxB,UAAoB,EACpB,aAA4B,EAC5B,SAAiB,EACjB,GAAW,EACX,WAA+B,EAC/B,YAAgC,EAChC,aAAiC,EACjC,YAAoB,EACpB,WAA+B,EAC/B,UAAkB,EAClB,SAAiB,EACjB,IAAe,EACf,MAAc;IAEd,MAAM,CAAC,IAAI,CAAC,0BAA0B,IAAI,CAAC,SAAS,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;IAEpE,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,yFAAyF;QACzF,MAAM,cAAc,GAAG,qBAAqB,CAAC;QAC7C,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC,EAAE;YAC/B,MAAM,IAAI,KAAK,CACb,uGAAuG,CACxG,CAAC;SACH;QACD,IAAI,CAAC,cAAc,CAAC,cAAc,EAAE,cAAc,CAAC,CAAC;KACrD;IAED,4EAA4E;IAC5E,KAAK,MAAM,IAAI,IAAI,UAAU,EAAE;QAC7B,uBAAuB,CAAC,IAAI,EAAE,MAAM,CAAC,CAAC;KACvC;IAED,IAAI,YAAY,GAAG,iBAAiB,CAAC,UAAU,CAAC,CAAC;IACjD,YAAY,GAAG,YAAY,CAAC,eAAe,CACzC,YAAY,EACZ,YAAY,EACZ,MAAM,CACP,CAAC;IAEF,MAAM,YAAY,GAAG,cAAI,CAAC,QAAQ,CAAC,YAAY,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IACpE,MAAM,WAAW,GAAG,kBAAO,CAAC,YAAY,CAAC,CAAC;IAE1C,MAAM,SAAS,GAAG,IAAI,CAAC,YAAY,CAAC,YAAY,CAAC,CAAC;IAElD,IAAI,OAAe,CAAC;IACpB,IAAI,IAAI,KAAK,SAAS,EAAE;QACtB,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,YAAY,EAAE,WAAW;YACzB,aAAa,EAAE,YAAY;YAC3B,KAAK,EAAE,YAAY;YACnB,eAAe,EAAE,aAAa;YAC9B,YAAY,EAAE,WAAW;YACzB,WAAW;YACX,UAAU,EAAE,OAAO,CAAC,GAAG,CAAC,SAAS,CAAC,0BAA0B,CAAC;YAC7D,UAAU,EAAE,SAAS;SACtB,CAAC,CAAC;KACJ;SAAM;QACL,OAAO,GAAG,IAAI,CAAC,SAAS,CAAC;YACvB,UAAU,EAAE,SAAS;YACrB,GAAG;YACH,KAAK,EAAE,YAAY;YACnB,YAAY,EAAE,WAAW;YACzB,SAAS,EAAE,SAAS,CAAC,CAAC,CAAC;SACxB,CAAC,CAAC;KACJ;IAED,4CAA4C;IAC5C,MAAM,kBAAkB,GAAG,YAAY,CAAC,MAAM,CAAC;IAC/C,MAAM,CAAC,KAAK,CAAC,oBAAoB,kBAAkB,QAAQ,CAAC,CAAC;IAC7D,MAAM,qBAAqB,GAAG,YAAY,CAAC,MAAM,CAAC;IAClD,MAAM,CAAC,KAAK,CAAC,8BAA8B,qBAAqB,QAAQ,CAAC,CAAC;IAC1E,MAAM,gBAAgB,GAAG,mBAAmB,CAAC,YAAY,CAAC,CAAC;IAC3D,MAAM,CAAC,KAAK,CAAC,gCAAgC,gBAAgB,EAAE,CAAC,CAAC;IAEjE,kBAAkB;IAClB,MAAM,aAAa,CACjB,OAAO,EACP,aAAa,EACb,UAAU,EACV,SAAS,EACT,IAAI,EACJ,MAAM,CACP,CAAC;IAEF,OAAO;QACL,qBAAqB,EAAE,kBAAkB;QACzC,wBAAwB,EAAE,qBAAqB;QAC/C,oBAAoB,EAAE,gBAAgB;KACvC,CAAC;AACJ,CAAC"} \ No newline at end of file diff --git a/node_modules/@octokit/plugin-retry/LICENSE b/node_modules/@octokit/plugin-retry/LICENSE new file mode 100644 index 000000000..12d45d98c --- /dev/null +++ b/node_modules/@octokit/plugin-retry/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Octokit contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/node_modules/@octokit/plugin-retry/README.md b/node_modules/@octokit/plugin-retry/README.md new file mode 100644 index 000000000..46ec98982 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/README.md @@ -0,0 +1,105 @@ +# plugin-retry.js + +> Retries requests for server 4xx/5xx responses except `400`, `401`, `403` and `404`. + +[![@latest](https://img.shields.io/npm/v/@octokit/plugin-retry.svg)](https://www.npmjs.com/package/@octokit/plugin-retry) +[![Build Status](https://github.com/octokit/plugin-retry.js/workflows/Test/badge.svg)](https://github.com/octokit/plugin-retry.js/actions?workflow=Test) + +## Usage + + + + + + +
+Browsers + + +Load `@octokit/plugin-retry` and [`@octokit/core`](https://github.com/octokit/core.js) (or core-compatible module) directly from [cdn.pika.dev](https://cdn.pika.dev) + +```html + +``` + +
+Node + + +Install with `npm install @octokit/core @octokit/plugin-retry`. Optionally replace `@octokit/core` with a core-compatible module + +```js +const { Octokit } = require("@octokit/core"); +const { retry } = require("@octokit/plugin-retry"); +``` + +
+ +**Note**: If you use it with `@octokit/rest` v16, install `@octokit/core` as a devDependency. This is only temporary and will no longer be necessary with `@octokit/rest` v17. + +```js +const MyOctokit = Octokit.plugin(retry); +const octokit = new MyOctokit({ auth: "secret123" }); + +// retries request up to 3 times in case of a 500 response +octokit.request("/").catch((error) => { + if (error.request.request.retryCount) { + console.log( + `request failed after ${error.request.request.retryCount} retries` + ); + } + + console.error(error); +}); +``` + +To override the default `doNotRetry` list: + +```js +const octokit = new MyOctokit({ + auth: "secret123", + retry: { + doNotRetry: [ + /* List of HTTP 4xx/5xx status codes */ + ], + }, +}); +``` + +To override the number of retries: + +```js +const octokit = new MyOctokit({ + auth: "secret123", + request: { retries: 1 }, +}); +``` + +You can manually ask for retries for any request by passing `{ request: { retries: numRetries, retryAfter: delayInSeconds }}` + +```js +octokit + .request("/", { request: { retries: 1, retryAfter: 1 } }) + .catch((error) => { + if (error.request.request.retryCount) { + console.log( + `request failed after ${error.request.request.retryCount} retries` + ); + } + + console.error(error); + }); +``` + +Pass `{ retry: { enabled: false } }` to disable this plugin. + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) + +## License + +[MIT](LICENSE) diff --git a/node_modules/@octokit/plugin-retry/dist-node/index.js b/node_modules/@octokit/plugin-retry/dist-node/index.js new file mode 100644 index 000000000..674e2c328 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-node/index.js @@ -0,0 +1,75 @@ +'use strict'; + +Object.defineProperty(exports, '__esModule', { value: true }); + +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } + +var Bottleneck = _interopDefault(require('bottleneck/light')); + +// @ts-ignore +async function errorRequest(octokit, state, error, options) { + if (!error.request || !error.request.request) { + // address https://github.com/octokit/plugin-retry.js/issues/8 + throw error; + } // retry all >= 400 && not doNotRetry + + + if (error.status >= 400 && !state.doNotRetry.includes(error.status)) { + const retries = options.request.retries != null ? options.request.retries : state.retries; + const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2); + throw octokit.retry.retryRequest(error, retries, retryAfter); + } // Maybe eventually there will be more cases here + + + throw error; +} + +// @ts-ignore + +async function wrapRequest(state, request, options) { + const limiter = new Bottleneck(); // @ts-ignore + + limiter.on("failed", function (error, info) { + const maxRetries = ~~error.request.request.retries; + const after = ~~error.request.request.retryAfter; + options.request.retryCount = info.retryCount + 1; + + if (maxRetries > info.retryCount) { + // Returning a number instructs the limiter to retry + // the request after that number of milliseconds have passed + return after * state.retryAfterBaseValue; + } + }); + return limiter.schedule(request, options); +} + +const VERSION = "3.0.3"; +function retry(octokit, octokitOptions = {}) { + const state = Object.assign({ + enabled: true, + retryAfterBaseValue: 1000, + doNotRetry: [400, 401, 403, 404, 422], + retries: 3 + }, octokitOptions.retry); + octokit.retry = { + retryRequest: (error, retries, retryAfter) => { + error.request.request = Object.assign({}, error.request.request, { + retries: retries, + retryAfter: retryAfter + }); + return error; + } + }; + + if (!state.enabled) { + return; + } + + octokit.hook.error("request", errorRequest.bind(null, octokit, state)); + octokit.hook.wrap("request", wrapRequest.bind(null, state)); +} +retry.VERSION = VERSION; + +exports.VERSION = VERSION; +exports.retry = retry; +//# sourceMappingURL=index.js.map diff --git a/node_modules/@octokit/plugin-retry/dist-node/index.js.map b/node_modules/@octokit/plugin-retry/dist-node/index.js.map new file mode 100644 index 000000000..e0c9041f2 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-node/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sources":["../dist-src/error-request.js","../dist-src/wrap-request.js","../dist-src/index.js"],"sourcesContent":["// @ts-ignore\nexport async function errorRequest(octokit, state, error, options) {\n if (!error.request || !error.request.request) {\n // address https://github.com/octokit/plugin-retry.js/issues/8\n throw error;\n }\n // retry all >= 400 && not doNotRetry\n if (error.status >= 400 && !state.doNotRetry.includes(error.status)) {\n const retries = options.request.retries != null ? options.request.retries : state.retries;\n const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2);\n throw octokit.retry.retryRequest(error, retries, retryAfter);\n }\n // Maybe eventually there will be more cases here\n throw error;\n}\n","// @ts-ignore\nimport Bottleneck from \"bottleneck/light\";\n// @ts-ignore\nexport async function wrapRequest(state, request, options) {\n const limiter = new Bottleneck();\n // @ts-ignore\n limiter.on(\"failed\", function (error, info) {\n const maxRetries = ~~error.request.request.retries;\n const after = ~~error.request.request.retryAfter;\n options.request.retryCount = info.retryCount + 1;\n if (maxRetries > info.retryCount) {\n // Returning a number instructs the limiter to retry\n // the request after that number of milliseconds have passed\n return after * state.retryAfterBaseValue;\n }\n });\n return limiter.schedule(request, options);\n}\n","import { errorRequest } from \"./error-request\";\nimport { wrapRequest } from \"./wrap-request\";\nexport const VERSION = \"3.0.3\";\nexport function retry(octokit, octokitOptions = {}) {\n const state = Object.assign({\n enabled: true,\n retryAfterBaseValue: 1000,\n doNotRetry: [400, 401, 403, 404, 422],\n retries: 3,\n }, octokitOptions.retry);\n octokit.retry = {\n retryRequest: (error, retries, retryAfter) => {\n error.request.request = Object.assign({}, error.request.request, {\n retries: retries,\n retryAfter: retryAfter,\n });\n return error;\n },\n };\n if (!state.enabled) {\n return;\n }\n octokit.hook.error(\"request\", errorRequest.bind(null, octokit, state));\n octokit.hook.wrap(\"request\", wrapRequest.bind(null, state));\n}\nretry.VERSION = VERSION;\n"],"names":["errorRequest","octokit","state","error","options","request","status","doNotRetry","includes","retries","retryAfter","Math","pow","retryCount","retry","retryRequest","wrapRequest","limiter","Bottleneck","on","info","maxRetries","after","retryAfterBaseValue","schedule","VERSION","octokitOptions","Object","assign","enabled","hook","bind","wrap"],"mappings":";;;;;;;;AAAA;AACO,eAAeA,YAAf,CAA4BC,OAA5B,EAAqCC,KAArC,EAA4CC,KAA5C,EAAmDC,OAAnD,EAA4D;AAC/D,MAAI,CAACD,KAAK,CAACE,OAAP,IAAkB,CAACF,KAAK,CAACE,OAAN,CAAcA,OAArC,EAA8C;AAC1C;AACA,UAAMF,KAAN;AACH,GAJ8D;;;AAM/D,MAAIA,KAAK,CAACG,MAAN,IAAgB,GAAhB,IAAuB,CAACJ,KAAK,CAACK,UAAN,CAAiBC,QAAjB,CAA0BL,KAAK,CAACG,MAAhC,CAA5B,EAAqE;AACjE,UAAMG,OAAO,GAAGL,OAAO,CAACC,OAAR,CAAgBI,OAAhB,IAA2B,IAA3B,GAAkCL,OAAO,CAACC,OAAR,CAAgBI,OAAlD,GAA4DP,KAAK,CAACO,OAAlF;AACA,UAAMC,UAAU,GAAGC,IAAI,CAACC,GAAL,CAAS,CAACR,OAAO,CAACC,OAAR,CAAgBQ,UAAhB,IAA8B,CAA/B,IAAoC,CAA7C,EAAgD,CAAhD,CAAnB;AACA,UAAMZ,OAAO,CAACa,KAAR,CAAcC,YAAd,CAA2BZ,KAA3B,EAAkCM,OAAlC,EAA2CC,UAA3C,CAAN;AACH,GAV8D;;;AAY/D,QAAMP,KAAN;AACH;;ACdD;AACA;AAEA,AAAO,eAAea,WAAf,CAA2Bd,KAA3B,EAAkCG,OAAlC,EAA2CD,OAA3C,EAAoD;AACvD,QAAMa,OAAO,GAAG,IAAIC,UAAJ,EAAhB,CADuD;;AAGvDD,EAAAA,OAAO,CAACE,EAAR,CAAW,QAAX,EAAqB,UAAUhB,KAAV,EAAiBiB,IAAjB,EAAuB;AACxC,UAAMC,UAAU,GAAG,CAAC,CAAClB,KAAK,CAACE,OAAN,CAAcA,OAAd,CAAsBI,OAA3C;AACA,UAAMa,KAAK,GAAG,CAAC,CAACnB,KAAK,CAACE,OAAN,CAAcA,OAAd,CAAsBK,UAAtC;AACAN,IAAAA,OAAO,CAACC,OAAR,CAAgBQ,UAAhB,GAA6BO,IAAI,CAACP,UAAL,GAAkB,CAA/C;;AACA,QAAIQ,UAAU,GAAGD,IAAI,CAACP,UAAtB,EAAkC;AAC9B;AACA;AACA,aAAOS,KAAK,GAAGpB,KAAK,CAACqB,mBAArB;AACH;AACJ,GATD;AAUA,SAAON,OAAO,CAACO,QAAR,CAAiBnB,OAAjB,EAA0BD,OAA1B,CAAP;AACH;;MCfYqB,OAAO,GAAG,mBAAhB;AACP,AAAO,SAASX,KAAT,CAAeb,OAAf,EAAwByB,cAAc,GAAG,EAAzC,EAA6C;AAChD,QAAMxB,KAAK,GAAGyB,MAAM,CAACC,MAAP,CAAc;AACxBC,IAAAA,OAAO,EAAE,IADe;AAExBN,IAAAA,mBAAmB,EAAE,IAFG;AAGxBhB,IAAAA,UAAU,EAAE,CAAC,GAAD,EAAM,GAAN,EAAW,GAAX,EAAgB,GAAhB,EAAqB,GAArB,CAHY;AAIxBE,IAAAA,OAAO,EAAE;AAJe,GAAd,EAKXiB,cAAc,CAACZ,KALJ,CAAd;AAMAb,EAAAA,OAAO,CAACa,KAAR,GAAgB;AACZC,IAAAA,YAAY,EAAE,CAACZ,KAAD,EAAQM,OAAR,EAAiBC,UAAjB,KAAgC;AAC1CP,MAAAA,KAAK,CAACE,OAAN,CAAcA,OAAd,GAAwBsB,MAAM,CAACC,MAAP,CAAc,EAAd,EAAkBzB,KAAK,CAACE,OAAN,CAAcA,OAAhC,EAAyC;AAC7DI,QAAAA,OAAO,EAAEA,OADoD;AAE7DC,QAAAA,UAAU,EAAEA;AAFiD,OAAzC,CAAxB;AAIA,aAAOP,KAAP;AACH;AAPW,GAAhB;;AASA,MAAI,CAACD,KAAK,CAAC2B,OAAX,EAAoB;AAChB;AACH;;AACD5B,EAAAA,OAAO,CAAC6B,IAAR,CAAa3B,KAAb,CAAmB,SAAnB,EAA8BH,YAAY,CAAC+B,IAAb,CAAkB,IAAlB,EAAwB9B,OAAxB,EAAiCC,KAAjC,CAA9B;AACAD,EAAAA,OAAO,CAAC6B,IAAR,CAAaE,IAAb,CAAkB,SAAlB,EAA6BhB,WAAW,CAACe,IAAZ,CAAiB,IAAjB,EAAuB7B,KAAvB,CAA7B;AACH;AACDY,KAAK,CAACW,OAAN,GAAgBA,OAAhB;;;;;"} \ No newline at end of file diff --git a/node_modules/@octokit/plugin-retry/dist-src/error-request.js b/node_modules/@octokit/plugin-retry/dist-src/error-request.js new file mode 100644 index 000000000..b79b01cb0 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-src/error-request.js @@ -0,0 +1,15 @@ +// @ts-ignore +export async function errorRequest(octokit, state, error, options) { + if (!error.request || !error.request.request) { + // address https://github.com/octokit/plugin-retry.js/issues/8 + throw error; + } + // retry all >= 400 && not doNotRetry + if (error.status >= 400 && !state.doNotRetry.includes(error.status)) { + const retries = options.request.retries != null ? options.request.retries : state.retries; + const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2); + throw octokit.retry.retryRequest(error, retries, retryAfter); + } + // Maybe eventually there will be more cases here + throw error; +} diff --git a/node_modules/@octokit/plugin-retry/dist-src/index.js b/node_modules/@octokit/plugin-retry/dist-src/index.js new file mode 100644 index 000000000..7849095ef --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-src/index.js @@ -0,0 +1,26 @@ +import { errorRequest } from "./error-request"; +import { wrapRequest } from "./wrap-request"; +export const VERSION = "0.0.0-development"; +export function retry(octokit, octokitOptions = {}) { + const state = Object.assign({ + enabled: true, + retryAfterBaseValue: 1000, + doNotRetry: [400, 401, 403, 404, 422], + retries: 3, + }, octokitOptions.retry); + octokit.retry = { + retryRequest: (error, retries, retryAfter) => { + error.request.request = Object.assign({}, error.request.request, { + retries: retries, + retryAfter: retryAfter, + }); + return error; + }, + }; + if (!state.enabled) { + return; + } + octokit.hook.error("request", errorRequest.bind(null, octokit, state)); + octokit.hook.wrap("request", wrapRequest.bind(null, state)); +} +retry.VERSION = VERSION; diff --git a/node_modules/@octokit/plugin-retry/dist-src/version.js b/node_modules/@octokit/plugin-retry/dist-src/version.js new file mode 100644 index 000000000..9350c15e0 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-src/version.js @@ -0,0 +1 @@ +export const VERSION = "3.0.3"; diff --git a/node_modules/@octokit/plugin-retry/dist-src/wrap-request.js b/node_modules/@octokit/plugin-retry/dist-src/wrap-request.js new file mode 100644 index 000000000..1454e2fb5 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-src/wrap-request.js @@ -0,0 +1,18 @@ +// @ts-ignore +import Bottleneck from "bottleneck/light"; +// @ts-ignore +export async function wrapRequest(state, request, options) { + const limiter = new Bottleneck(); + // @ts-ignore + limiter.on("failed", function (error, info) { + const maxRetries = ~~error.request.request.retries; + const after = ~~error.request.request.retryAfter; + options.request.retryCount = info.retryCount + 1; + if (maxRetries > info.retryCount) { + // Returning a number instructs the limiter to retry + // the request after that number of milliseconds have passed + return after * state.retryAfterBaseValue; + } + }); + return limiter.schedule(request, options); +} diff --git a/node_modules/@octokit/plugin-retry/dist-types/error-request.d.ts b/node_modules/@octokit/plugin-retry/dist-types/error-request.d.ts new file mode 100644 index 000000000..220f3ca6e --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-types/error-request.d.ts @@ -0,0 +1 @@ +export declare function errorRequest(octokit: any, state: any, error: any, options: any): Promise; diff --git a/node_modules/@octokit/plugin-retry/dist-types/index.d.ts b/node_modules/@octokit/plugin-retry/dist-types/index.d.ts new file mode 100644 index 000000000..92d5e4f31 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-types/index.d.ts @@ -0,0 +1,6 @@ +import { Octokit } from "@octokit/core"; +export declare const VERSION = "0.0.0-development"; +export declare function retry(octokit: Octokit, octokitOptions?: ConstructorParameters[0]): void; +export declare namespace retry { + var VERSION: string; +} diff --git a/node_modules/@octokit/plugin-retry/dist-types/version.d.ts b/node_modules/@octokit/plugin-retry/dist-types/version.d.ts new file mode 100644 index 000000000..09c2448d3 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-types/version.d.ts @@ -0,0 +1 @@ +export declare const VERSION = "3.0.3"; diff --git a/node_modules/@octokit/plugin-retry/dist-types/wrap-request.d.ts b/node_modules/@octokit/plugin-retry/dist-types/wrap-request.d.ts new file mode 100644 index 000000000..9333a888b --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-types/wrap-request.d.ts @@ -0,0 +1 @@ +export declare function wrapRequest(state: any, request: any, options: any): Promise; diff --git a/node_modules/@octokit/plugin-retry/dist-web/index.js b/node_modules/@octokit/plugin-retry/dist-web/index.js new file mode 100644 index 000000000..bbeea9bf8 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-web/index.js @@ -0,0 +1,63 @@ +import Bottleneck from 'bottleneck/light'; + +// @ts-ignore +async function errorRequest(octokit, state, error, options) { + if (!error.request || !error.request.request) { + // address https://github.com/octokit/plugin-retry.js/issues/8 + throw error; + } + // retry all >= 400 && not doNotRetry + if (error.status >= 400 && !state.doNotRetry.includes(error.status)) { + const retries = options.request.retries != null ? options.request.retries : state.retries; + const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2); + throw octokit.retry.retryRequest(error, retries, retryAfter); + } + // Maybe eventually there will be more cases here + throw error; +} + +// @ts-ignore +// @ts-ignore +async function wrapRequest(state, request, options) { + const limiter = new Bottleneck(); + // @ts-ignore + limiter.on("failed", function (error, info) { + const maxRetries = ~~error.request.request.retries; + const after = ~~error.request.request.retryAfter; + options.request.retryCount = info.retryCount + 1; + if (maxRetries > info.retryCount) { + // Returning a number instructs the limiter to retry + // the request after that number of milliseconds have passed + return after * state.retryAfterBaseValue; + } + }); + return limiter.schedule(request, options); +} + +const VERSION = "3.0.3"; +function retry(octokit, octokitOptions = {}) { + const state = Object.assign({ + enabled: true, + retryAfterBaseValue: 1000, + doNotRetry: [400, 401, 403, 404, 422], + retries: 3, + }, octokitOptions.retry); + octokit.retry = { + retryRequest: (error, retries, retryAfter) => { + error.request.request = Object.assign({}, error.request.request, { + retries: retries, + retryAfter: retryAfter, + }); + return error; + }, + }; + if (!state.enabled) { + return; + } + octokit.hook.error("request", errorRequest.bind(null, octokit, state)); + octokit.hook.wrap("request", wrapRequest.bind(null, state)); +} +retry.VERSION = VERSION; + +export { VERSION, retry }; +//# sourceMappingURL=index.js.map diff --git a/node_modules/@octokit/plugin-retry/dist-web/index.js.map b/node_modules/@octokit/plugin-retry/dist-web/index.js.map new file mode 100644 index 000000000..94a012fa6 --- /dev/null +++ b/node_modules/@octokit/plugin-retry/dist-web/index.js.map @@ -0,0 +1 @@ +{"version":3,"file":"index.js","sources":["../dist-src/error-request.js","../dist-src/wrap-request.js","../dist-src/index.js"],"sourcesContent":["// @ts-ignore\nexport async function errorRequest(octokit, state, error, options) {\n if (!error.request || !error.request.request) {\n // address https://github.com/octokit/plugin-retry.js/issues/8\n throw error;\n }\n // retry all >= 400 && not doNotRetry\n if (error.status >= 400 && !state.doNotRetry.includes(error.status)) {\n const retries = options.request.retries != null ? options.request.retries : state.retries;\n const retryAfter = Math.pow((options.request.retryCount || 0) + 1, 2);\n throw octokit.retry.retryRequest(error, retries, retryAfter);\n }\n // Maybe eventually there will be more cases here\n throw error;\n}\n","// @ts-ignore\nimport Bottleneck from \"bottleneck/light\";\n// @ts-ignore\nexport async function wrapRequest(state, request, options) {\n const limiter = new Bottleneck();\n // @ts-ignore\n limiter.on(\"failed\", function (error, info) {\n const maxRetries = ~~error.request.request.retries;\n const after = ~~error.request.request.retryAfter;\n options.request.retryCount = info.retryCount + 1;\n if (maxRetries > info.retryCount) {\n // Returning a number instructs the limiter to retry\n // the request after that number of milliseconds have passed\n return after * state.retryAfterBaseValue;\n }\n });\n return limiter.schedule(request, options);\n}\n","import { errorRequest } from \"./error-request\";\nimport { wrapRequest } from \"./wrap-request\";\nexport const VERSION = \"3.0.3\";\nexport function retry(octokit, octokitOptions = {}) {\n const state = Object.assign({\n enabled: true,\n retryAfterBaseValue: 1000,\n doNotRetry: [400, 401, 403, 404, 422],\n retries: 3,\n }, octokitOptions.retry);\n octokit.retry = {\n retryRequest: (error, retries, retryAfter) => {\n error.request.request = Object.assign({}, error.request.request, {\n retries: retries,\n retryAfter: retryAfter,\n });\n return error;\n },\n };\n if (!state.enabled) {\n return;\n }\n octokit.hook.error(\"request\", errorRequest.bind(null, octokit, state));\n octokit.hook.wrap(\"request\", wrapRequest.bind(null, state));\n}\nretry.VERSION = VERSION;\n"],"names":[],"mappings":";;AAAA;AACO,eAAe,YAAY,CAAC,OAAO,EAAE,KAAK,EAAE,KAAK,EAAE,OAAO,EAAE;AACnE,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,EAAE;AAClD;AACA,QAAQ,MAAM,KAAK,CAAC;AACpB,KAAK;AACL;AACA,IAAI,IAAI,KAAK,CAAC,MAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,QAAQ,CAAC,KAAK,CAAC,MAAM,CAAC,EAAE;AACzE,QAAQ,MAAM,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC,OAAO,IAAI,IAAI,GAAG,OAAO,CAAC,OAAO,CAAC,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;AAClG,QAAQ,MAAM,UAAU,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,UAAU,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC;AAC9E,QAAQ,MAAM,OAAO,CAAC,KAAK,CAAC,YAAY,CAAC,KAAK,EAAE,OAAO,EAAE,UAAU,CAAC,CAAC;AACrE,KAAK;AACL;AACA,IAAI,MAAM,KAAK,CAAC;AAChB;;ACdA;AACA,AACA;AACA,AAAO,eAAe,WAAW,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,EAAE;AAC3D,IAAI,MAAM,OAAO,GAAG,IAAI,UAAU,EAAE,CAAC;AACrC;AACA,IAAI,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,UAAU,KAAK,EAAE,IAAI,EAAE;AAChD,QAAQ,MAAM,UAAU,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO,CAAC;AAC3D,QAAQ,MAAM,KAAK,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,UAAU,CAAC;AACzD,QAAQ,OAAO,CAAC,OAAO,CAAC,UAAU,GAAG,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC;AACzD,QAAQ,IAAI,UAAU,GAAG,IAAI,CAAC,UAAU,EAAE;AAC1C;AACA;AACA,YAAY,OAAO,KAAK,GAAG,KAAK,CAAC,mBAAmB,CAAC;AACrD,SAAS;AACT,KAAK,CAAC,CAAC;AACP,IAAI,OAAO,OAAO,CAAC,QAAQ,CAAC,OAAO,EAAE,OAAO,CAAC,CAAC;AAC9C,CAAC;;ACfW,MAAC,OAAO,GAAG,mBAAmB,CAAC;AAC3C,AAAO,SAAS,KAAK,CAAC,OAAO,EAAE,cAAc,GAAG,EAAE,EAAE;AACpD,IAAI,MAAM,KAAK,GAAG,MAAM,CAAC,MAAM,CAAC;AAChC,QAAQ,OAAO,EAAE,IAAI;AACrB,QAAQ,mBAAmB,EAAE,IAAI;AACjC,QAAQ,UAAU,EAAE,CAAC,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,CAAC;AAC7C,QAAQ,OAAO,EAAE,CAAC;AAClB,KAAK,EAAE,cAAc,CAAC,KAAK,CAAC,CAAC;AAC7B,IAAI,OAAO,CAAC,KAAK,GAAG;AACpB,QAAQ,YAAY,EAAE,CAAC,KAAK,EAAE,OAAO,EAAE,UAAU,KAAK;AACtD,YAAY,KAAK,CAAC,OAAO,CAAC,OAAO,GAAG,MAAM,CAAC,MAAM,CAAC,EAAE,EAAE,KAAK,CAAC,OAAO,CAAC,OAAO,EAAE;AAC7E,gBAAgB,OAAO,EAAE,OAAO;AAChC,gBAAgB,UAAU,EAAE,UAAU;AACtC,aAAa,CAAC,CAAC;AACf,YAAY,OAAO,KAAK,CAAC;AACzB,SAAS;AACT,KAAK,CAAC;AACN,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,EAAE;AACxB,QAAQ,OAAO;AACf,KAAK;AACL,IAAI,OAAO,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,EAAE,YAAY,CAAC,IAAI,CAAC,IAAI,EAAE,OAAO,EAAE,KAAK,CAAC,CAAC,CAAC;AAC3E,IAAI,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,WAAW,CAAC,IAAI,CAAC,IAAI,EAAE,KAAK,CAAC,CAAC,CAAC;AAChE,CAAC;AACD,KAAK,CAAC,OAAO,GAAG,OAAO,CAAC;;;;"} \ No newline at end of file diff --git a/node_modules/@octokit/plugin-retry/package.json b/node_modules/@octokit/plugin-retry/package.json new file mode 100644 index 000000000..8915a50df --- /dev/null +++ b/node_modules/@octokit/plugin-retry/package.json @@ -0,0 +1,49 @@ +{ + "name": "@octokit/plugin-retry", + "description": "Automatic retry plugin for octokit", + "version": "3.0.3", + "license": "MIT", + "files": [ + "dist-*/", + "bin/" + ], + "pika": true, + "sideEffects": false, + "homepage": "https://github.com/octokit/plugin-retry.js#readme", + "bugs": { + "url": "https://github.com/octokit/plugin-retry.js/issues" + }, + "repository": { + "type": "git", + "url": "https://github.com/octokit/plugin-retry.js.git" + }, + "dependencies": { + "@octokit/types": "^5.0.0", + "bottleneck": "^2.15.3" + }, + "devDependencies": { + "@octokit/core": "^2.0.0", + "@octokit/request-error": "^2.0.0", + "@pika/pack": "^0.5.0", + "@pika/plugin-build-node": "^0.9.0", + "@pika/plugin-build-web": "^0.9.0", + "@pika/plugin-ts-standard-pkg": "^0.9.0", + "@types/fetch-mock": "^7.3.1", + "@types/jest": "^26.0.0", + "@types/node": "^14.0.0", + "fetch-mock": "^9.0.0", + "jest": "^26.0.1", + "prettier": "^2.0.1", + "semantic-release": "^17.0.0", + "semantic-release-plugin-update-version-in-files": "^1.0.0", + "ts-jest": "^26.0.0", + "typescript": "^3.7.2" + }, + "publishConfig": { + "access": "public" + }, + "source": "dist-src/index.js", + "types": "dist-types/index.d.ts", + "main": "dist-node/index.js", + "module": "dist-web/index.js" +} \ No newline at end of file diff --git a/node_modules/bottleneck/.babelrc.es5 b/node_modules/bottleneck/.babelrc.es5 new file mode 100644 index 000000000..e7120e3d0 --- /dev/null +++ b/node_modules/bottleneck/.babelrc.es5 @@ -0,0 +1,5 @@ +{ + "presets": [ + ["@babel/preset-env", {}] + ] +} \ No newline at end of file diff --git a/node_modules/bottleneck/.babelrc.lib b/node_modules/bottleneck/.babelrc.lib new file mode 100644 index 000000000..de9dbbad9 --- /dev/null +++ b/node_modules/bottleneck/.babelrc.lib @@ -0,0 +1,9 @@ +{ + "presets": [ + ["@babel/preset-env", { + "targets": { + "node": "6.0" + } + }] + ] +} \ No newline at end of file diff --git a/node_modules/bottleneck/.env b/node_modules/bottleneck/.env new file mode 100644 index 000000000..7afc96eec --- /dev/null +++ b/node_modules/bottleneck/.env @@ -0,0 +1,2 @@ +REDIS_HOST=127.0.0.1 +REDIS_PORT=6379 diff --git a/node_modules/bottleneck/.travis.yml b/node_modules/bottleneck/.travis.yml new file mode 100644 index 000000000..8204ece5c --- /dev/null +++ b/node_modules/bottleneck/.travis.yml @@ -0,0 +1,25 @@ +language: node_js +node_js: + - 8 +branches: + only: + - master + - next +services: + - redis-server +env: + global: + - "REDIS_HOST=127.0.0.1" + - "REDIS_PORT=6379" +cache: + directories: + - $HOME/.npm +install: +- npm i +sudo: required +after_success: npx codecov --file=./coverage/lcov.info +script: npm run test-all + +before_install: + - npm i -g npm@5.10 + - npm --version \ No newline at end of file diff --git a/node_modules/bottleneck/LICENSE b/node_modules/bottleneck/LICENSE new file mode 100644 index 000000000..835fc3145 --- /dev/null +++ b/node_modules/bottleneck/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Grondin + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/bottleneck/README.md b/node_modules/bottleneck/README.md new file mode 100644 index 000000000..bb8d52f84 --- /dev/null +++ b/node_modules/bottleneck/README.md @@ -0,0 +1,1027 @@ +# bottleneck + +[![Downloads][npm-downloads]][npm-url] +[![version][npm-version]][npm-url] +[![License][npm-license]][license-url] + + +Bottleneck is a lightweight and zero-dependency Task Scheduler and Rate Limiter for Node.js and the browser. + +Bottleneck is an easy solution as it adds very little complexity to your code. It is battle-hardened, reliable and production-ready and used on a large scale in private companies and open source software. + +It supports **Clustering**: it can rate limit jobs across multiple Node.js instances. It uses Redis and strictly atomic operations to stay reliable in the presence of unreliable clients and networks. It also supports *Redis Cluster* and *Redis Sentinel*. + +**[Upgrading from version 1?](#upgrading-to-v2)** + + + +- [Install](#install) +- [Quick Start](#quick-start) + * [Gotchas & Common Mistakes](#gotchas--common-mistakes) +- [Constructor](#constructor) +- [Reservoir Intervals](#reservoir-intervals) +- [`submit()`](#submit) +- [`schedule()`](#schedule) +- [`wrap()`](#wrap) +- [Job Options](#job-options) +- [Jobs Lifecycle](#jobs-lifecycle) +- [Events](#events) +- [Retries](#retries) +- [`updateSettings()`](#updatesettings) +- [`incrementReservoir()`](#incrementreservoir) +- [`currentReservoir()`](#currentreservoir) +- [`stop()`](#stop) +- [`chain()`](#chain) +- [Group](#group) +- [Batching](#batching) +- [Clustering](#clustering) +- [Debugging Your Application](#debugging-your-application) +- [Upgrading To v2](#upgrading-to-v2) +- [Contributing](#contributing) + + + +## Install + +``` +npm install --save bottleneck +``` + +```js +import Bottleneck from "bottleneck"; + +// Note: To support older browsers and Node <6.0, you must import the ES5 bundle instead. +var Bottleneck = require("bottleneck/es5"); +``` + +## Quick Start + +### Step 1 of 3 + +Most APIs have a rate limit. For example, to execute 3 requests per second: +```js +const limiter = new Bottleneck({ + minTime: 333 +}); +``` + +If there's a chance some requests might take longer than 333ms and you want to prevent more than 1 request from running at a time, add `maxConcurrent: 1`: +```js +const limiter = new Bottleneck({ + maxConcurrent: 1, + minTime: 333 +}); +``` + +`minTime` and `maxConcurrent` are enough for the majority of use cases. They work well together to ensure a smooth rate of requests. If your use case requires executing requests in **bursts** or every time a quota resets, look into [Reservoir Intervals](#reservoir-intervals). + +### Step 2 of 3 + +#### ➤ Using promises? + +Instead of this: +```js +myFunction(arg1, arg2) +.then((result) => { + /* handle result */ +}); +``` +Do this: +```js +limiter.schedule(() => myFunction(arg1, arg2)) +.then((result) => { + /* handle result */ +}); +``` +Or this: +```js +const wrapped = limiter.wrap(myFunction); + +wrapped(arg1, arg2) +.then((result) => { + /* handle result */ +}); +``` + +#### ➤ Using async/await? + +Instead of this: +```js +const result = await myFunction(arg1, arg2); +``` +Do this: +```js +const result = await limiter.schedule(() => myFunction(arg1, arg2)); +``` +Or this: +```js +const wrapped = limiter.wrap(myFunction); + +const result = await wrapped(arg1, arg2); +``` + +#### ➤ Using callbacks? + +Instead of this: +```js +someAsyncCall(arg1, arg2, callback); +``` +Do this: +```js +limiter.submit(someAsyncCall, arg1, arg2, callback); +``` + +### Step 3 of 3 + +Remember... + +Bottleneck builds a queue of jobs and executes them as soon as possible. By default, the jobs will be executed in the order they were received. + +**Read the 'Gotchas' and you're good to go**. Or keep reading to learn about all the fine tuning and advanced options available. If your rate limits need to be enforced across a cluster of computers, read the [Clustering](#clustering) docs. + +[Need help debugging your application?](#debugging-your-application) + +Instead of throttling maybe [you want to batch up requests](#batching) into fewer calls? + +### Gotchas & Common Mistakes + +* Make sure the function you pass to `schedule()` or `wrap()` only returns once **all the work it does** has completed. + +Instead of this: +```js +limiter.schedule(() => { + tasksArray.forEach(x => processTask(x)); + // BAD, we return before our processTask() functions are finished processing! +}); +``` +Do this: +```js +limiter.schedule(() => { + const allTasks = tasksArray.map(x => processTask(x)); + // GOOD, we wait until all tasks are done. + return Promise.all(allTasks); +}); +``` + +* If you're passing an object's method as a job, you'll probably need to `bind()` the object: +```js +// instead of this: +limiter.schedule(object.doSomething); +// do this: +limiter.schedule(object.doSomething.bind(object)); +// or, wrap it in an arrow function instead: +limiter.schedule(() => object.doSomething()); +``` + +* Bottleneck requires Node 6+ to function. However, an ES5 build is included: `var Bottleneck = require("bottleneck/es5");`. + +* Make sure you're catching `"error"` events emitted by your limiters! + +* Consider setting a `maxConcurrent` value instead of leaving it `null`. This can help your application's performance, especially if you think the limiter's queue might become very long. + +* If you plan on using `priorities`, make sure to set a `maxConcurrent` value. + +* **When using `submit()`**, if a callback isn't necessary, you must pass `null` or an empty function instead. It will not work otherwise. + +* **When using `submit()`**, make sure all the jobs will eventually complete by calling their callback, or set an [`expiration`](#job-options). Even if you submitted your job with a `null` callback , it still needs to call its callback. This is particularly important if you are using a `maxConcurrent` value that isn't `null` (unlimited), otherwise those not completed jobs will be clogging up the limiter and no new jobs will be allowed to run. It's safe to call the callback more than once, subsequent calls are ignored. + +## Docs + +### Constructor + +```js +const limiter = new Bottleneck({/* options */}); +``` + +Basic options: + +| Option | Default | Description | +|--------|---------|-------------| +| `maxConcurrent` | `null` (unlimited) | How many jobs can be executing at the same time. Consider setting a value instead of leaving it `null`, it can help your application's performance, especially if you think the limiter's queue might get very long. | +| `minTime` | `0` ms | How long to wait after launching a job before launching another one. | +| `highWater` | `null` (unlimited) | How long can the queue be? When the queue length exceeds that value, the selected `strategy` is executed to shed the load. | +| `strategy` | `Bottleneck.strategy.LEAK` | Which strategy to use when the queue gets longer than the high water mark. [Read about strategies](#strategies). Strategies are never executed if `highWater` is `null`. | +| `penalty` | `15 * minTime`, or `5000` when `minTime` is `0` | The `penalty` value used by the `BLOCK` strategy. | +| `reservoir` | `null` (unlimited) | How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`. New jobs will still be queued up. | +| `reservoirRefreshInterval` | `null` (disabled) | Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically updated to the value of `reservoirRefreshAmount`. The `reservoirRefreshInterval` value should be a [multiple of 250 (5000 for Clustering)](https://github.com/SGrondin/bottleneck/issues/88). | +| `reservoirRefreshAmount` | `null` (disabled) | The value to set `reservoir` to when `reservoirRefreshInterval` is in use. | +| `reservoirIncreaseInterval` | `null` (disabled) | Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`. The `reservoirIncreaseInterval` value should be a [multiple of 250 (5000 for Clustering)](https://github.com/SGrondin/bottleneck/issues/88). | +| `reservoirIncreaseAmount` | `null` (disabled) | The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use. | +| `reservoirIncreaseMaximum` | `null` (disabled) | The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use. | +| `Promise` | `Promise` (built-in) | This lets you override the Promise library used by Bottleneck. | + + +### Reservoir Intervals + +Reservoir Intervals let you execute requests in bursts, by automatically controlling the limiter's `reservoir` value. The `reservoir` is simply the number of jobs the limiter is allowed to execute. Once the value reaches 0, it stops starting new jobs. + +There are 2 types of Reservoir Intervals: Refresh Intervals and Increase Intervals. + +#### Refresh Interval + +In this example, we throttle to 100 requests every 60 seconds: + +```js +const limiter = new Bottleneck({ + reservoir: 100, // initial value + reservoirRefreshAmount: 100, + reservoirRefreshInterval: 60 * 1000, // must be divisible by 250 + + // also use maxConcurrent and/or minTime for safety + maxConcurrent: 1, + minTime: 333 // pick a value that makes sense for your use case +}); +``` +`reservoir` is a counter decremented every time a job is launched, we set its initial value to 100. Then, every `reservoirRefreshInterval` (60000 ms), `reservoir` is automatically updated to be equal to the `reservoirRefreshAmount` (100). + +#### Increase Interval + +In this example, we throttle jobs to meet the Shopify API Rate Limits. Users are allowed to send 40 requests initially, then every second grants 2 more requests up to a maximum of 40. + +```js +const limiter = new Bottleneck({ + reservoir: 40, // initial value + reservoirIncreaseAmount: 2, + reservoirIncreaseInterval: 1000, // must be divisible by 250 + reservoirIncreaseMaximum: 40, + + // also use maxConcurrent and/or minTime for safety + maxConcurrent: 5, + minTime: 250 // pick a value that makes sense for your use case +}); +``` + +#### Warnings + +Reservoir Intervals are an advanced feature, please take the time to read and understand the following warnings. + +- **Reservoir Intervals are not a replacement for `minTime` and `maxConcurrent`.** It's strongly recommended to also use `minTime` and/or `maxConcurrent` to spread out the load. For example, suppose a lot of jobs are queued up because the `reservoir` is 0. Every time the Refresh Interval is triggered, a number of jobs equal to `reservoirRefreshAmount` will automatically be launched, all at the same time! To prevent this flooding effect and keep your application running smoothly, use `minTime` and `maxConcurrent` to **stagger** the jobs. + +- **The Reservoir Interval starts from the moment the limiter is created**. Let's suppose we're using `reservoirRefreshAmount: 5`. If you happen to add 10 jobs just 1ms before the refresh is triggered, the first 5 will run immediately, then 1ms later it will refresh the reservoir value and that will make the last 5 also run right away. It will have run 10 jobs in just over 1ms no matter what your reservoir interval was! + +- **Reservoir Intervals prevent a limiter from being garbage collected.** Call `limiter.disconnect()` to clear the interval and allow the memory to be freed. However, it's not necessary to call `.disconnect()` to allow the Node.js process to exit. + +### submit() + +Adds a job to the queue. This is the callback version of `schedule()`. +```js +limiter.submit(someAsyncCall, arg1, arg2, callback); +``` +You can pass `null` instead of an empty function if there is no callback, but `someAsyncCall` still needs to call **its** callback to let the limiter know it has completed its work. + +`submit()` can also accept [advanced options](#job-options). + +### schedule() + +Adds a job to the queue. This is the Promise and async/await version of `submit()`. +```js +const fn = function(arg1, arg2) { + return httpGet(arg1, arg2); // Here httpGet() returns a promise +}; + +limiter.schedule(fn, arg1, arg2) +.then((result) => { + /* ... */ +}); +``` +In other words, `schedule()` takes a function **fn** and a list of arguments. `schedule()` returns a promise that will be executed according to the rate limits. + +`schedule()` can also accept [advanced options](#job-options). + +Here's another example: +```js +// suppose that `client.get(url)` returns a promise + +const url = "https://wikipedia.org"; + +limiter.schedule(() => client.get(url)) +.then(response => console.log(response.body)); +``` + +### wrap() + +Takes a function that returns a promise. Returns a function identical to the original, but rate limited. +```js +const wrapped = limiter.wrap(fn); + +wrapped() +.then(function (result) { + /* ... */ +}) +.catch(function (error) { + // Bottleneck might need to fail the job even if the original function can never fail. + // For example, your job is taking longer than the `expiration` time you've set. +}); +``` + +### Job Options + +`submit()`, `schedule()`, and `wrap()` all accept advanced options. +```js +// Submit +limiter.submit({/* options */}, someAsyncCall, arg1, arg2, callback); + +// Schedule +limiter.schedule({/* options */}, fn, arg1, arg2); + +// Wrap +const wrapped = limiter.wrap(fn); +wrapped.withOptions({/* options */}, arg1, arg2); +``` + +| Option | Default | Description | +|--------|---------|-------------| +| `priority` | `5` | A priority between `0` and `9`. A job with a priority of `4` will be queued ahead of a job with a priority of `5`. **Important:** You must set a low `maxConcurrent` value for priorities to work, otherwise there is nothing to queue because jobs will be be scheduled immediately! | +| `weight` | `1` | Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`) and decreases the `reservoir` value. | +| `expiration` | `null` (unlimited) | The number of milliseconds a job is given to complete. Jobs that execute for longer than `expiration` ms will be failed with a `BottleneckError`. | +| `id` | `` | You should give an ID to your jobs, it helps with [debugging](#debugging-your-application). | + +### Strategies + +A strategy is a simple algorithm that is executed every time adding a job would cause the number of queued jobs to exceed `highWater`. Strategies are never executed if `highWater` is `null`. + +#### Bottleneck.strategy.LEAK +When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added. + +#### Bottleneck.strategy.OVERFLOW_PRIORITY +Same as `LEAK`, except it will only drop jobs that are *less important* than the one being added. If all the queued jobs are as or more important than the new one, it will not be added. + +#### Bottleneck.strategy.OVERFLOW +When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels. + +#### Bottleneck.strategy.BLOCK +When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels. + + +### Jobs lifecycle + +1. **Received**. Your new job has been added to the limiter. Bottleneck needs to check whether it can be accepted into the queue. +2. **Queued**. Bottleneck has accepted your job, but it can not tell at what exact timestamp it will run yet, because it is dependent on previous jobs. +3. **Running**. Your job is not in the queue anymore, it will be executed after a delay that was computed according to your `minTime` setting. +4. **Executing**. Your job is executing its code. +5. **Done**. Your job has completed. + +**Note:** By default, Bottleneck does not keep track of DONE jobs, to save memory. You can enable this feature by passing `trackDoneStatus: true` as an option when creating a limiter. + +#### counts() + +```js +const counts = limiter.counts(); + +console.log(counts); +/* +{ + RECEIVED: 0, + QUEUED: 0, + RUNNING: 0, + EXECUTING: 0, + DONE: 0 +} +*/ +``` + +Returns an object with the current number of jobs per status in the limiter. + +#### jobStatus() + +```js +console.log(limiter.jobStatus("some-job-id")); +// Example: QUEUED +``` + +Returns the status of the job with the provided job id **in the limiter**. Returns `null` if no job with that id exist. + +#### jobs() + +```js +console.log(limiter.jobs("RUNNING")); +// Example: ['id1', 'id2'] +``` + +Returns an array of all the job ids with the specified status **in the limiter**. Not passing a status string returns all the known ids. + +#### queued() + +```js +const count = limiter.queued(priority); + +console.log(count); +``` + +`priority` is optional. Returns the number of `QUEUED` jobs with the given `priority` level. Omitting the `priority` argument returns the total number of queued jobs **in the limiter**. + +#### clusterQueued() + +```js +const count = await limiter.clusterQueued(); + +console.log(count); +``` + +Returns the number of `QUEUED` jobs **in the Cluster**. + +#### empty() + +```js +if (limiter.empty()) { + // do something... +} +``` + +Returns a boolean which indicates whether there are any `RECEIVED` or `QUEUED` jobs **in the limiter**. + +#### running() + +```js +limiter.running() +.then((count) => console.log(count)); +``` + +Returns a promise that returns the **total weight** of the `RUNNING` and `EXECUTING` jobs **in the Cluster**. + +#### done() + +```js +limiter.done() +.then((count) => console.log(count)); +``` + +Returns a promise that returns the **total weight** of `DONE` jobs **in the Cluster**. Does not require passing the `trackDoneStatus: true` option. + +#### check() + +```js +limiter.check() +.then((wouldRunNow) => console.log(wouldRunNow)); +``` +Checks if a new job would be executed immediately if it was submitted now. Returns a promise that returns a boolean. + + +### Events + +__'error'__ +```js +limiter.on("error", function (error) { + /* handle errors here */ +}); +``` + +The two main causes of error events are: uncaught exceptions in your event handlers, and network errors when Clustering is enabled. + +__'failed'__ +```js +limiter.on("failed", function (error, jobInfo) { + // This will be called every time a job fails. +}); +``` + +__'retry'__ + +See [Retries](#retries) to learn how to automatically retry jobs. +```js +limiter.on("retry", function (message, jobInfo) { + // This will be called every time a job is retried. +}); +``` + +__'empty'__ +```js +limiter.on("empty", function () { + // This will be called when `limiter.empty()` becomes true. +}); +``` + +__'idle'__ +```js +limiter.on("idle", function () { + // This will be called when `limiter.empty()` is `true` and `limiter.running()` is `0`. +}); +``` + +__'dropped'__ +```js +limiter.on("dropped", function (dropped) { + // This will be called when a strategy was triggered. + // The dropped request is passed to this event listener. +}); +``` + +__'depleted'__ +```js +limiter.on("depleted", function (empty) { + // This will be called every time the reservoir drops to 0. + // The `empty` (boolean) argument indicates whether `limiter.empty()` is currently true. +}); +``` + +__'debug'__ +```js +limiter.on("debug", function (message, data) { + // Useful to figure out what the limiter is doing in real time + // and to help debug your application +}); +``` + +__'received'__ +__'queued'__ +__'scheduled'__ +__'executing'__ +__'done'__ +```js +limiter.on("queued", function (info) { + // This event is triggered when a job transitions from one Lifecycle stage to another +}); +``` + +See [Jobs Lifecycle](#jobs-lifecycle) for more information. + +These Lifecycle events are not triggered for jobs located on another limiter in a Cluster, for performance reasons. + +#### Other event methods + +Use `removeAllListeners()` with an optional event name as first argument to remove listeners. + +Use `.once()` instead of `.on()` to only receive a single event. + + +### Retries + +The following example: +```js +const limiter = new Bottleneck(); + +// Listen to the "failed" event +limiter.on("failed", async (error, jobInfo) => { + const id = jobInfo.options.id; + console.warn(`Job ${id} failed: ${error}`); + + if (jobInfo.retryCount === 0) { // Here we only retry once + console.log(`Retrying job ${id} in 25ms!`); + return 25; + } +}); + +// Listen to the "retry" event +limiter.on("retry", (error, jobInfo) => console.log(`Now retrying ${jobInfo.options.id}`)); + +const main = async function () { + let executions = 0; + + // Schedule one job + const result = await limiter.schedule({ id: 'ABC123' }, async () => { + executions++; + if (executions === 1) { + throw new Error("Boom!"); + } else { + return "Success!"; + } + }); + + console.log(`Result: ${result}`); +} + +main(); +``` +will output +``` +Job ABC123 failed: Error: Boom! +Retrying job ABC123 in 25ms! +Now retrying ABC123 +Result: Success! +``` +To re-run your job, simply return an integer from the `'failed'` event handler. The number returned is how many milliseconds to wait before retrying it. Return `0` to retry it immediately. + +**IMPORTANT:** When you ask the limiter to retry a job it will not send it back into the queue. It will stay in the `EXECUTING` [state](#jobs-lifecycle) until it succeeds or until you stop retrying it. **This means that it counts as a concurrent job for `maxConcurrent` even while it's just waiting to be retried.** The number of milliseconds to wait ignores your `minTime` settings. + + +### updateSettings() + +```js +limiter.updateSettings(options); +``` +The options are the same as the [limiter constructor](#constructor). + +**Note:** Changes don't affect `SCHEDULED` jobs. + +### incrementReservoir() + +```js +limiter.incrementReservoir(incrementBy); +``` +Returns a promise that returns the new reservoir value. + +### currentReservoir() + +```js +limiter.currentReservoir() +.then((reservoir) => console.log(reservoir)); +``` +Returns a promise that returns the current reservoir value. + +### stop() + +The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all `EXECUTING` jobs to complete. + +```js +limiter.stop(options) +.then(() => { + console.log("Shutdown completed!") +}); +``` + +`stop()` returns a promise that resolves once all the `EXECUTING` jobs have completed and, if desired, once all non-`EXECUTING` jobs have been dropped. + +| Option | Default | Description | +|--------|---------|-------------| +| `dropWaitingJobs` | `true` | When `true`, drop all the `RECEIVED`, `QUEUED` and `RUNNING` jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method. | +| `dropErrorMessage` | `This limiter has been stopped.` | The error message used to drop jobs when `dropWaitingJobs` is `true`. | +| `enqueueErrorMessage` | `This limiter has been stopped and cannot accept new jobs.` | The error message used to reject a job added to the limiter after `stop()` has been called. | + +### chain() + +Tasks that are ready to be executed will be added to that other limiter. Suppose you have 2 types of tasks, A and B. They both have their own limiter with their own settings, but both must also follow a global limiter G: +```js +const limiterA = new Bottleneck( /* some settings */ ); +const limiterB = new Bottleneck( /* some different settings */ ); +const limiterG = new Bottleneck( /* some global settings */ ); + +limiterA.chain(limiterG); +limiterB.chain(limiterG); + +// Requests added to limiterA must follow the A and G rate limits. +// Requests added to limiterB must follow the B and G rate limits. +// Requests added to limiterG must follow the G rate limits. +``` + +To unchain, call `limiter.chain(null);`. + +## Group + +The `Group` feature of Bottleneck manages many limiters automatically for you. It creates limiters dynamically and transparently. + +Let's take a DNS server as an example of how Bottleneck can be used. It's a service that sees a lot of abuse and where incoming DNS requests need to be rate limited. Bottleneck is so tiny, it's acceptable to create one limiter for each origin IP, even if it means creating thousands of limiters. The `Group` feature is perfect for this use case. Create one Group and use the origin IP to rate limit each IP independently. Each call with the same key (IP) will be routed to the same underlying limiter. A Group is created like a limiter: + + +```js +const group = new Bottleneck.Group(options); +``` + +The `options` object will be used for every limiter created by the Group. + +The Group is then used with the `.key(str)` method: + +```js +// In this example, the key is an IP +group.key("77.66.54.32").schedule(() => { + /* process the request */ +}); +``` + +#### key() + +* `str` : The key to use. All jobs added with the same key will use the same underlying limiter. *Default: `""`* + +The return value of `.key(str)` is a limiter. If it doesn't already exist, it is generated for you. Calling `key()` is how limiters are created inside a Group. + +Limiters that have been idle for longer than 5 minutes are deleted to avoid memory leaks, this value can be changed by passing a different `timeout` option, in milliseconds. + +#### on("created") + +```js +group.on("created", (limiter, key) => { + console.log("A new limiter was created for key: " + key) + + // Prepare the limiter, for example we'll want to listen to its "error" events! + limiter.on("error", (err) => { + // Handle errors here + }) +}); +``` + +Listening for the `"created"` event is the recommended way to set up a new limiter. Your event handler is executed before `key()` returns the newly created limiter. + +#### updateSettings() + +```js +const group = new Bottleneck.Group({ maxConcurrent: 2, minTime: 250 }); +group.updateSettings({ minTime: 500 }); +``` +After executing the above commands, **new limiters** will be created with `{ maxConcurrent: 2, minTime: 500 }`. + + +#### deleteKey() + +* `str`: The key for the limiter to delete. + +Manually deletes the limiter at the specified key. When using Clustering, the Redis data is immediately deleted and the other Groups in the Cluster will eventually delete their local key automatically, unless it is still being used. + +#### keys() + +Returns an array containing all the keys in the Group. + +#### clusterKeys() + +Same as `group.keys()`, but returns all keys in this Group ID across the Cluster. + +#### limiters() + +```js +const limiters = group.limiters(); + +console.log(limiters); +// [ { key: "some key", limiter: }, { key: "some other key", limiter: } ] +``` + +## Batching + +Some APIs can accept multiple operations in a single call. Bottleneck's Batching feature helps you take advantage of those APIs: +```js +const batcher = new Bottleneck.Batcher({ + maxTime: 1000, + maxSize: 10 +}); + +batcher.on("batch", (batch) => { + console.log(batch); // ["some-data", "some-other-data"] + + // Handle batch here +}); + +batcher.add("some-data"); +batcher.add("some-other-data"); +``` + +`batcher.add()` returns a Promise that resolves once the request has been flushed to a `"batch"` event. + +| Option | Default | Description | +|--------|---------|-------------| +| `maxTime` | `null` (unlimited) | Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event. | +| `maxSize` | `null` (unlimited) | Maximum number of requests in a batch. | + +Batching doesn't throttle requests, it only groups them up optimally according to your `maxTime` and `maxSize` settings. + +## Clustering + +Clustering lets many limiters access the same shared state, stored in Redis. Changes to the state are Atomic, Consistent and Isolated (and fully [ACID](https://en.wikipedia.org/wiki/ACID) with the right [Durability](https://redis.io/topics/persistence) configuration), to eliminate any chances of race conditions or state corruption. Your settings, such as `maxConcurrent`, `minTime`, etc., are shared across the whole cluster, which means —for example— that `{ maxConcurrent: 5 }` guarantees no more than 5 jobs can ever run at a time in the entire cluster of limiters. 100% of Bottleneck's features are supported in Clustering mode. Enabling Clustering is as simple as changing a few settings. It's also a convenient way to store or export state for later use. + +Bottleneck will attempt to spread load evenly across limiters. + +### Enabling Clustering + +First, add `redis` or `ioredis` to your application's dependencies: +```bash +# NodeRedis (https://github.com/NodeRedis/node_redis) +npm install --save redis + +# or ioredis (https://github.com/luin/ioredis) +npm install --save ioredis +``` +Then create a limiter or a Group: +```js +const limiter = new Bottleneck({ + /* Some basic options */ + maxConcurrent: 5, + minTime: 500 + id: "my-super-app" // All limiters with the same id will be clustered together + + /* Clustering options */ + datastore: "redis", // or "ioredis" + clearDatastore: false, + clientOptions: { + host: "127.0.0.1", + port: 6379 + + // Redis client options + // Using NodeRedis? See https://github.com/NodeRedis/node_redis#options-object-properties + // Using ioredis? See https://github.com/luin/ioredis/blob/master/API.md#new-redisport-host-options + } +}); +``` + +| Option | Default | Description | +|--------|---------|-------------| +| `datastore` | `"local"` | Where the limiter stores its internal state. The default (`"local"`) keeps the state in the limiter itself. Set it to `"redis"` or `"ioredis"` to enable Clustering. | +| `clearDatastore` | `false` | When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db. | +| `clientOptions` | `{}` | This object is passed directly to the redis client library you've selected. | +| `clusterNodes` | `null` | **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)` instead of `new Redis(clientOptions)`. | +| `timeout` | `null` (no TTL) | The Redis TTL in milliseconds ([TTL](https://redis.io/commands/ttl)) for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after `timeout` milliseconds of inactivity. | +| `Redis` | `null` | Overrides the import/require of the redis/ioredis library. You shouldn't need to set this option unless your application is failing to start due to a failure to require/import the client library. | + +**Note: When using Groups**, the `timeout` option has a default of `300000` milliseconds and the generated limiters automatically receive an `id` with the pattern `${group.id}-${KEY}`. + +**Note:** If you are seeing a runtime error due to the `require()` function not being able to load `redis`/`ioredis`, then directly pass the module as the `Redis` option. Example: +```js +import Redis from "ioredis" + +const limiter = new Bottleneck({ + id: "my-super-app", + datastore: "ioredis", + clientOptions: { host: '12.34.56.78', port: 6379 }, + Redis +}); +``` +Unfortunately, this is a side effect of having to disable inlining, which is necessary to make Bottleneck easy to use in the browser. + +### Important considerations when Clustering + +The first limiter connecting to Redis will store its [constructor options](#constructor) on Redis and all subsequent limiters will be using those settings. You can alter the constructor options used by all the connected limiters by calling `updateSettings()`. The `clearDatastore` option instructs a new limiter to wipe any previous Bottleneck data (for that `id`), including previously stored settings. + +Queued jobs are **NOT** stored on Redis. They are local to each limiter. Exiting the Node.js process will lose those jobs. This is because Bottleneck has no way to propagate the JS code to run a job across a different Node.js process than the one it originated on. Bottleneck doesn't keep track of the queue contents of the limiters on a cluster for performance and reliability reasons. You can use something like [`BeeQueue`](https://github.com/bee-queue/bee-queue) in addition to Bottleneck to get around this limitation. + +Due to the above, functionality relying on the queue length happens purely locally: +- Priorities are local. A higher priority job will run before a lower priority job **on the same limiter**. Another limiter on the cluster might run a lower priority job before our higher priority one. +- Assuming constant priority levels, Bottleneck guarantees that jobs will be run in the order they were received **on the same limiter**. Another limiter on the cluster might run a job received later before ours runs. +- `highWater` and load shedding ([strategies](#strategies)) are per limiter. However, one limiter entering Blocked mode will put the entire cluster in Blocked mode until `penalty` milliseconds have passed. See [Strategies](#strategies). +- The `"empty"` event is triggered when the (local) queue is empty. +- The `"idle"` event is triggered when the (local) queue is empty *and* no jobs are currently running anywhere in the cluster. + +You must work around these limitations in your application code if they are an issue to you. The `publish()` method could be useful here. + +The current design guarantees reliability, is highly performant and lets limiters come and go. Your application can scale up or down, and clients can be disconnected at any time without issues. + +It is **strongly recommended** that you give an `id` to every limiter and Group since it is used to build the name of your limiter's Redis keys! Limiters with the same `id` inside the same Redis db will be sharing the same datastore. + +It is **strongly recommended** that you set an `expiration` (See [Job Options](#job-options)) *on every job*, since that lets the cluster recover from crashed or disconnected clients. Otherwise, a client crashing while executing a job would not be able to tell the cluster to decrease its number of "running" jobs. By using expirations, those lost jobs are automatically cleared after the specified time has passed. Using expirations is essential to keeping a cluster reliable in the face of unpredictable application bugs, network hiccups, and so on. + +Network latency between Node.js and Redis is not taken into account when calculating timings (such as `minTime`). To minimize the impact of latency, Bottleneck only performs a single Redis call per [lifecycle transition](#jobs-lifecycle). Keeping the Redis server close to your limiters will help you get a more consistent experience. Keeping the system time consistent across all clients will also help. + +It is **strongly recommended** to [set up an `"error"` listener](#events) on all your limiters and on your Groups. + +### Clustering Methods + +The `ready()`, `publish()` and `clients()` methods also exist when using the `local` datastore, for code compatibility reasons: code written for `redis`/`ioredis` won't break with `local`. + +#### ready() + +This method returns a promise that resolves once the limiter is connected to Redis. + +As of v2.9.0, it's no longer necessary to wait for `.ready()` to resolve before issuing commands to a limiter. The commands will be queued until the limiter successfully connects. Make sure to listen to the `"error"` event to handle connection errors. + +```js +const limiter = new Bottleneck({/* options */}); + +limiter.on("error", (err) => { + // handle network errors +}); + +limiter.ready() +.then(() => { + // The limiter is ready +}); +``` + +#### publish(message) + +This method broadcasts the `message` string to every limiter in the Cluster. It returns a promise. +```js +const limiter = new Bottleneck({/* options */}); + +limiter.on("message", (msg) => { + console.log(msg); // prints "this is a string" +}); + +limiter.publish("this is a string"); +``` + +To send objects, stringify them first: +```js +limiter.on("message", (msg) => { + console.log(JSON.parse(msg).hello) // prints "world" +}); + +limiter.publish(JSON.stringify({ hello: "world" })); +``` + +#### clients() + +If you need direct access to the redis clients, use `.clients()`: +```js +console.log(limiter.clients()); +// { client: , subscriber: } +``` + +### Additional Clustering information + +- Bottleneck is compatible with [Redis Clusters](https://redis.io/topics/cluster-tutorial), but you must use the `ioredis` datastore and the `clusterNodes` option. +- Bottleneck is compatible with Redis Sentinel, but you must use the `ioredis` datastore. +- Bottleneck's data is stored in Redis keys starting with `b_`. It also uses pubsub channels starting with `b_` It will not interfere with any other data stored on the server. +- Bottleneck loads a few Lua scripts on the Redis server using the `SCRIPT LOAD` command. These scripts only take up a few Kb of memory. Running the `SCRIPT FLUSH` command will cause any connected limiters to experience critical errors until a new limiter connects to Redis and loads the scripts again. +- The Lua scripts are highly optimized and designed to use as few resources as possible. + +### Managing Redis Connections + +Bottleneck needs to create 2 Redis Clients to function, one for normal operations and one for pubsub subscriptions. These 2 clients are kept in a `Bottleneck.RedisConnection` (NodeRedis) or a `Bottleneck.IORedisConnection` (ioredis) object, referred to as the Connection object. + +By default, every Group and every standalone limiter (a limiter not created by a Group) will create their own Connection object, but it is possible to manually control this behavior. In this example, every Group and limiter is sharing the same Connection object and therefore the same 2 clients: +```js +const connection = new Bottleneck.RedisConnection({ + clientOptions: {/* NodeRedis/ioredis options */} + // ioredis also accepts `clusterNodes` here +}); + + +const limiter = new Bottleneck({ connection: connection }); +const group = new Bottleneck.Group({ connection: connection }); +``` +You can access and reuse the Connection object of any Group or limiter: +```js +const group = new Bottleneck.Group({ connection: limiter.connection }); +``` +When a Connection object is created manually, the connectivity `"error"` events are emitted on the Connection itself. +```js +connection.on("error", (err) => { /* handle connectivity errors here */ }); +``` +If you already have a NodeRedis/ioredis client, you can ask Bottleneck to reuse it, although currently the Connection object will still create a second client for pubsub operations: +```js +import Redis from "redis"; +const client = new Redis.createClient({/* options */}); + +const connection = new Bottleneck.RedisConnection({ + // `clientOptions` and `clusterNodes` will be ignored since we're passing a raw client + client: client +}); + +const limiter = new Bottleneck({ connection: connection }); +const group = new Bottleneck.Group({ connection: connection }); +``` +Depending on your application, using more clients can improve performance. + +Use the `disconnect(flush)` method to close the Redis clients. +```js +limiter.disconnect(); +group.disconnect(); +``` +If you created the Connection object manually, you need to call `connection.disconnect()` instead, for safety reasons. + +## Debugging your application + +Debugging complex scheduling logic can be difficult, especially when priorities, weights, and network latency all interact with one another. + +If your application is not behaving as expected, start by making sure you're catching `"error"` [events emitted](#events) by your limiters and your Groups. Those errors are most likely uncaught exceptions from your application code. + +Make sure you've read the ['Gotchas'](#gotchas) section. + +To see exactly what a limiter is doing in real time, listen to the `"debug"` event. It contains detailed information about how the limiter is executing your code. Adding [job IDs](#job-options) to all your jobs makes the debug output more readable. + +When Bottleneck has to fail one of your jobs, it does so by using `BottleneckError` objects. This lets you tell those errors apart from your own code's errors: +```js +limiter.schedule(fn) +.then((result) => { /* ... */ } ) +.catch((error) => { + if (error instanceof Bottleneck.BottleneckError) { + /* ... */ + } +}); +``` + +## Upgrading to v2 + +The internal algorithms essentially haven't changed from v1, but many small changes to the interface were made to introduce new features. + +All the breaking changes: +- Bottleneck v2 requires Node 6+ or a modern browser. Use `require("bottleneck/es5")` if you need ES5 support in v2. Bottleneck v1 will continue to use ES5 only. +- The Bottleneck constructor now takes an options object. See [Constructor](#constructor). +- The `Cluster` feature is now called `Group`. This is to distinguish it from the new v2 [Clustering](#clustering) feature. +- The `Group` constructor takes an options object to match the limiter constructor. +- Jobs take an optional options object. See [Job options](#job-options). +- Removed `submitPriority()`, use `submit()` with an options object instead. +- Removed `schedulePriority()`, use `schedule()` with an options object instead. +- The `rejectOnDrop` option is now `true` by default. It can be set to `false` if you wish to retain v1 behavior. However this option is left undocumented as enabling it is considered to be a poor practice. +- Use `null` instead of `0` to indicate an unlimited `maxConcurrent` value. +- Use `null` instead of `-1` to indicate an unlimited `highWater` value. +- Renamed `changeSettings()` to `updateSettings()`, it now returns a promise to indicate completion. It takes the same options object as the constructor. +- Renamed `nbQueued()` to `queued()`. +- Renamed `nbRunning` to `running()`, it now returns its result using a promise. +- Removed `isBlocked()`. +- Changing the Promise library is now done through the options object like any other limiter setting. +- Removed `changePenalty()`, it is now done through the options object like any other limiter setting. +- Removed `changeReservoir()`, it is now done through the options object like any other limiter setting. +- Removed `stopAll()`. Use the new `stop()` method. +- `check()` now accepts an optional `weight` argument, and returns its result using a promise. +- Removed the `Group` `changeTimeout()` method. Instead, pass a `timeout` option when creating a Group. + +Version 2 is more user-friendly and powerful. + +After upgrading your code, please take a minute to read the [Debugging your application](#debugging-your-application) chapter. + + +## Contributing + +This README is always in need of improvements. If wording can be clearer and simpler, please consider forking this repo and submitting a Pull Request, or simply opening an issue. + +Suggestions and bug reports are also welcome. + +To work on the Bottleneck code, simply clone the repo, makes your changes to the files located in `src/` only, then run `./scripts/build.sh && npm test` to ensure that everything is set up correctly. + +To speed up compilation time during development, run `./scripts/build.sh dev` instead. Make sure to build and test without `dev` before submitting a PR. + +The tests must also pass in Clustering mode and using the ES5 bundle. You'll need a Redis server running locally (latency needs to be minimal to run the tests). If the server isn't using the default hostname and port, you can set those in the `.env` file. Then run `./scripts/build.sh && npm run test-all`. + +All contributions are appreciated and will be considered. + +[license-url]: https://github.com/SGrondin/bottleneck/blob/master/LICENSE + +[npm-url]: https://www.npmjs.com/package/bottleneck +[npm-license]: https://img.shields.io/npm/l/bottleneck.svg?style=flat +[npm-version]: https://img.shields.io/npm/v/bottleneck.svg?style=flat +[npm-downloads]: https://img.shields.io/npm/dm/bottleneck.svg?style=flat diff --git a/node_modules/bottleneck/bottleneck.d.ts b/node_modules/bottleneck/bottleneck.d.ts new file mode 100644 index 000000000..3ad20c128 --- /dev/null +++ b/node_modules/bottleneck/bottleneck.d.ts @@ -0,0 +1,629 @@ +declare module "bottleneck" { + namespace Bottleneck { + type ConstructorOptions = { + /** + * How many jobs can be running at the same time. + */ + readonly maxConcurrent?: number | null; + /** + * How long to wait after launching a job before launching another one. + */ + readonly minTime?: number | null; + /** + * How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load. + */ + readonly highWater?: number | null; + /** + * Which strategy to use if the queue gets longer than the high water mark. + */ + readonly strategy?: Bottleneck.Strategy | null; + /** + * The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy. + */ + readonly penalty?: number | null; + /** + * How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`. + */ + readonly reservoir?: number | null; + /** + * Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`. + */ + readonly reservoirRefreshInterval?: number | null; + /** + * The value to reset `reservoir` to when `reservoirRefreshInterval` is in use. + */ + readonly reservoirRefreshAmount?: number | null; + /** + * The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use. + */ + readonly reservoirIncreaseAmount?: number | null; + /** + * Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`. + */ + readonly reservoirIncreaseInterval?: number | null; + /** + * The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use. + */ + readonly reservoirIncreaseMaximum?: number | null; + /** + * Optional identifier + */ + readonly id?: string | null; + /** + * Set to true to leave your failed jobs hanging instead of failing them. + */ + readonly rejectOnDrop?: boolean | null; + /** + * Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory. + */ + readonly trackDoneStatus?: boolean | null; + /** + * Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering. + */ + readonly datastore?: string | null; + /** + * Override the Promise library used by Bottleneck. + */ + readonly Promise?: any; + /** + * This object is passed directly to the redis client library you've selected. + */ + readonly clientOptions?: any; + /** + * **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`. + */ + readonly clusterNodes?: any; + /** + * An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use. + * If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored. + */ + /** + * Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime. + */ + readonly Redis?: any; + /** + * Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`. + */ + readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null; + /** + * When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db. + */ + readonly clearDatastore?: boolean | null; + /** + * The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group. + */ + readonly timeout?: number | null; + + [propName: string]: any; + }; + type JobOptions = { + /** + * A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`. + */ + readonly priority?: number | null; + /** + * Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using). + */ + readonly weight?: number | null; + /** + * The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`. + */ + readonly expiration?: number | null; + /** + * Optional identifier, helps with debug output. + */ + readonly id?: string | null; + }; + type StopOptions = { + /** + * When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method. + */ + readonly dropWaitingJobs?: boolean | null; + /** + * The error message used to drop jobs when `dropWaitingJobs` is `true`. + */ + readonly dropErrorMessage?: string | null; + /** + * The error message used to reject a job added to the limiter after `stop()` has been called. + */ + readonly enqueueErrorMessage?: string | null; + }; + type Callback = (err: any, result: T) => void; + type ClientsList = { client?: any; subscriber?: any }; + type GroupLimiterPair = { key: string; limiter: Bottleneck }; + interface Strategy {} + + type EventInfo = { + readonly args: any[]; + readonly options: { + readonly id: string; + readonly priority: number; + readonly weight: number; + readonly expiration?: number; + }; + }; + type EventInfoDropped = EventInfo & { + readonly task: Function; + readonly promise: Promise; + }; + type EventInfoQueued = EventInfo & { + readonly reachedHWM: boolean; + readonly blocked: boolean; + }; + type EventInfoRetryable = EventInfo & { readonly retryCount: number; }; + + enum Status { + RECEIVED = "RECEIVED", + QUEUED = "QUEUED", + RUNNING = "RUNNING", + EXECUTING = "EXECUTING", + DONE = "DONE" + } + type Counts = { + RECEIVED: number, + QUEUED: number, + RUNNING: number, + EXECUTING: number, + DONE?: number + }; + + type RedisConnectionOptions = { + /** + * This object is passed directly to NodeRedis' createClient() method. + */ + readonly clientOptions?: any; + /** + * An existing NodeRedis client to use. If using, `clientOptions` will be ignored. + */ + readonly client?: any; + /** + * Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime. + */ + readonly Redis?: any; + }; + + type IORedisConnectionOptions = { + /** + * This object is passed directly to ioredis' constructor method. + */ + readonly clientOptions?: any; + /** + * When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`. + */ + readonly clusterNodes?: any; + /** + * An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored. + */ + readonly client?: any; + /** + * Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime. + */ + readonly Redis?: any; + }; + + type BatcherOptions = { + /** + * Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event. + */ + readonly maxTime?: number | null; + /** + * Maximum number of requests in a batch. + */ + readonly maxSize?: number | null; + }; + + class BottleneckError extends Error { + } + + class RedisConnection { + constructor(options?: Bottleneck.RedisConnectionOptions); + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: "error", fn: (error: any) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: "error", fn: (error: any) => void): void; + + /** + * Waits until the connection is ready and returns the raw Node_Redis clients. + */ + ready(): Promise; + + /** + * Close the redis clients. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + } + + class IORedisConnection { + constructor(options?: Bottleneck.IORedisConnectionOptions); + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: "error", fn: (error: any) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: "error", fn: (error: any) => void): void; + + /** + * Waits until the connection is ready and returns the raw ioredis clients. + */ + ready(): Promise; + + /** + * Close the redis clients. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + } + + class Batcher { + constructor(options?: Bottleneck.BatcherOptions); + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: string, fn: Function): void; + on(name: "error", fn: (error: any) => void): void; + on(name: "batch", fn: (batch: any[]) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: string, fn: Function): void; + once(name: "error", fn: (error: any) => void): void; + once(name: "batch", fn: (batch: any[]) => void): void; + + /** + * Add a request to the Batcher. Batches are flushed to the "batch" event. + */ + add(data: any): Promise; + } + + class Group { + constructor(options?: Bottleneck.ConstructorOptions); + + id: string; + datastore: string; + connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection; + + /** + * Returns the limiter for the specified key. + * @param str - The limiter key. + */ + key(str: string): Bottleneck; + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: string, fn: Function): void; + on(name: "error", fn: (error: any) => void): void; + on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: string, fn: Function): void; + once(name: "error", fn: (error: any) => void): void; + once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void; + + /** + * Removes all registered event listeners. + * @param name - The optional event name to remove listeners from. + */ + removeAllListeners(name?: string): void; + + /** + * Updates the group settings. + * @param options - The new settings. + */ + updateSettings(options: Bottleneck.ConstructorOptions): void; + + /** + * Deletes the limiter for the given key. + * Returns true if a key was deleted. + * @param str - The key + */ + deleteKey(str: string): Promise; + + /** + * Disconnects the underlying redis clients, unless the Group was created with the `connection` option. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + + /** + * Returns all the key-limiter pairs. + */ + limiters(): Bottleneck.GroupLimiterPair[]; + + /** + * Returns all Group keys in the local instance + */ + keys(): string[]; + + /** + * Returns all Group keys in the Cluster + */ + clusterKeys(): Promise; + } + + class Events { + constructor(object: Object); + + /** + * Returns the number of limiters for the event name + * @param name - The event name. + */ + listenerCount(name: string): number; + + /** + * Returns a promise with the first non-null/non-undefined result from a listener + * @param name - The event name. + * @param args - The arguments to pass to the event listeners. + */ + trigger(name: string, ...args: any[]): Promise; + } + } + + class Bottleneck { + public static readonly strategy: { + /** + * When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added. + */ + readonly LEAK: Bottleneck.Strategy; + /** + * Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added. + */ + readonly OVERFLOW_PRIORITY: Bottleneck.Strategy; + /** + * When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels. + */ + readonly OVERFLOW: Bottleneck.Strategy; + /** + * When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels. + */ + readonly BLOCK: Bottleneck.Strategy; + }; + + constructor(options?: Bottleneck.ConstructorOptions); + + id: string; + datastore: string; + connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection; + + /** + * Returns a promise which will be resolved once the limiter is ready to accept jobs + * or rejected if it fails to start up. + */ + ready(): Promise; + + /** + * Returns a datastore-specific object of redis clients. + */ + clients(): Bottleneck.ClientsList; + + /** + * Returns the name of the Redis pubsub channel used for this limiter + */ + channel(): string; + + /** + * Disconnects the underlying redis clients, unless the limiter was created with the `connection` option. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + + /** + * Broadcast a string to every limiter in the Cluster. + */ + publish(message: string): Promise; + + /** + * Returns an object with the current number of jobs per status. + */ + counts(): Bottleneck.Counts; + + /** + * Returns the status of the job with the provided job id. + */ + jobStatus(id: string): Bottleneck.Status; + + /** + * Returns the status of the job with the provided job id. + */ + jobs(status?: Bottleneck.Status): string[]; + + /** + * Returns the number of requests queued. + * @param priority - Returns the number of requests queued with the specified priority. + */ + queued(priority?: number): number; + + /** + * Returns the number of requests queued across the Cluster. + */ + clusterQueued(): Promise; + + /** + * Returns whether there are any jobs currently in the queue or in the process of being added to the queue. + */ + empty(): boolean; + + /** + * Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster. + */ + running(): Promise; + + /** + * Returns the total weight of jobs in a DONE state in the Cluster. + */ + done(): Promise; + + /** + * If a request was added right now, would it be run immediately? + * @param weight - The weight of the request + */ + check(weight?: number): Promise; + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: "error", fn: (error: any) => void): void; + on(name: "empty", fn: () => void): void; + on(name: "idle", fn: () => void): void; + on(name: "depleted", fn: (empty: boolean) => void): void; + on(name: "message", fn: (message: string) => void): void; + on(name: "debug", fn: (message: string, info: any) => void): void; + on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void; + on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void; + on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void; + on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void; + on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void; + on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void; + on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: "error", fn: (error: any) => void): void; + once(name: "empty", fn: () => void): void; + once(name: "idle", fn: () => void): void; + once(name: "depleted", fn: (empty: boolean) => void): void; + once(name: "message", fn: (message: string) => void): void; + once(name: "debug", fn: (message: string, info: any) => void): void; + once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void; + once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void; + once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void; + once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void; + once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void; + once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void; + once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + + /** + * Removes all registered event listeners. + * @param name - The optional event name to remove listeners from. + */ + removeAllListeners(name?: string): void; + + /** + * Changes the settings for future requests. + * @param options - The new settings. + */ + updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck; + + /** + * Adds to the reservoir count and returns the new value. + */ + incrementReservoir(incrementBy: number): Promise; + + /** + * The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete. + */ + stop(options?: Bottleneck.StopOptions): Promise; + + /** + * Returns the current reservoir count, if any. + */ + currentReservoir(): Promise; + + /** + * Chain this limiter to another. + * @param limiter - The limiter that requests to this limiter must also follow. + */ + chain(limiter?: Bottleneck): Bottleneck; + + wrap(fn: () => PromiseLike): (() => Promise) & { withOptions: (options: Bottleneck.JobOptions) => Promise; }; + wrap(fn: (arg1: A1) => PromiseLike): ((arg1: A1) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2) => PromiseLike): ((arg1: A1, arg2: A2) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => Promise; }; + wrap(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike): ((arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise) & { withOptions: (options: Bottleneck.JobOptions, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => Promise; }; + + submit(fn: (callback: Bottleneck.Callback) => void, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, callback: Bottleneck.Callback) => void, arg1: A1, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback): void; + submit(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback): void; + + submit(options: Bottleneck.JobOptions, fn: (callback: Bottleneck.Callback) => void, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, callback: Bottleneck.Callback) => void, arg1: A1, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, callback: Bottleneck.Callback): void; + submit(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback) => void, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10, callback: Bottleneck.Callback): void; + + schedule(fn: () => PromiseLike): Promise; + schedule(fn: (arg1: A1) => PromiseLike, arg1: A1): Promise; + schedule(fn: (arg1: A1, arg2: A2) => PromiseLike, arg1: A1, arg2: A2): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike, arg1: A1, arg2: A2, arg3: A3): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise; + schedule(fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise; + + schedule(options: Bottleneck.JobOptions, fn: () => PromiseLike): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1) => PromiseLike, arg1: A1): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2) => PromiseLike, arg1: A1, arg2: A2): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3) => PromiseLike, arg1: A1, arg2: A2, arg3: A3): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9): Promise; + schedule(options: Bottleneck.JobOptions, fn: (arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10) => PromiseLike, arg1: A1, arg2: A2, arg3: A3, arg4: A4, arg5: A5, arg6: A6, arg7: A7, arg8: A8, arg9: A9, arg10: A10): Promise; + } + + export default Bottleneck; +} + diff --git a/node_modules/bottleneck/bottleneck.d.ts.ejs b/node_modules/bottleneck/bottleneck.d.ts.ejs new file mode 100644 index 000000000..18f19ad8a --- /dev/null +++ b/node_modules/bottleneck/bottleneck.d.ts.ejs @@ -0,0 +1,588 @@ +declare module "bottleneck" { + namespace Bottleneck { + type ConstructorOptions = { + /** + * How many jobs can be running at the same time. + */ + readonly maxConcurrent?: number | null; + /** + * How long to wait after launching a job before launching another one. + */ + readonly minTime?: number | null; + /** + * How long can the queue get? When the queue length exceeds that value, the selected `strategy` is executed to shed the load. + */ + readonly highWater?: number | null; + /** + * Which strategy to use if the queue gets longer than the high water mark. + */ + readonly strategy?: Bottleneck.Strategy | null; + /** + * The `penalty` value used by the `Bottleneck.strategy.BLOCK` strategy. + */ + readonly penalty?: number | null; + /** + * How many jobs can be executed before the limiter stops executing jobs. If `reservoir` reaches `0`, no jobs will be executed until it is no longer `0`. + */ + readonly reservoir?: number | null; + /** + * Every `reservoirRefreshInterval` milliseconds, the `reservoir` value will be automatically reset to `reservoirRefreshAmount`. + */ + readonly reservoirRefreshInterval?: number | null; + /** + * The value to reset `reservoir` to when `reservoirRefreshInterval` is in use. + */ + readonly reservoirRefreshAmount?: number | null; + /** + * The increment applied to `reservoir` when `reservoirIncreaseInterval` is in use. + */ + readonly reservoirIncreaseAmount?: number | null; + /** + * Every `reservoirIncreaseInterval` milliseconds, the `reservoir` value will be automatically incremented by `reservoirIncreaseAmount`. + */ + readonly reservoirIncreaseInterval?: number | null; + /** + * The maximum value that `reservoir` can reach when `reservoirIncreaseInterval` is in use. + */ + readonly reservoirIncreaseMaximum?: number | null; + /** + * Optional identifier + */ + readonly id?: string | null; + /** + * Set to true to leave your failed jobs hanging instead of failing them. + */ + readonly rejectOnDrop?: boolean | null; + /** + * Set to true to keep track of done jobs with counts() and jobStatus(). Uses more memory. + */ + readonly trackDoneStatus?: boolean | null; + /** + * Where the limiter stores its internal state. The default (`local`) keeps the state in the limiter itself. Set it to `redis` to enable Clustering. + */ + readonly datastore?: string | null; + /** + * Override the Promise library used by Bottleneck. + */ + readonly Promise?: any; + /** + * This object is passed directly to the redis client library you've selected. + */ + readonly clientOptions?: any; + /** + * **ioredis only.** When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`. + */ + readonly clusterNodes?: any; + /** + * An existing Bottleneck.RedisConnection or Bottleneck.IORedisConnection object to use. + * If using, `datastore`, `clientOptions` and `clusterNodes` will be ignored. + */ + /** + * Optional Redis/IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require Redis/IORedis at runtime. + */ + readonly Redis?: any; + /** + * Bottleneck connection object created from `new Bottleneck.RedisConnection` or `new Bottleneck.IORedisConnection`. + */ + readonly connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection | null; + /** + * When set to `true`, on initial startup, the limiter will wipe any existing Bottleneck state data on the Redis db. + */ + readonly clearDatastore?: boolean | null; + /** + * The Redis TTL in milliseconds for the keys created by the limiter. When `timeout` is set, the limiter's state will be automatically removed from Redis after timeout milliseconds of inactivity. Note: timeout is 300000 (5 minutes) by default when using a Group. + */ + readonly timeout?: number | null; + + [propName: string]: any; + }; + type JobOptions = { + /** + * A priority between `0` and `9`. A job with a priority of `4` will _always_ be executed before a job with a priority of `5`. + */ + readonly priority?: number | null; + /** + * Must be an integer equal to or higher than `0`. The `weight` is what increases the number of running jobs (up to `maxConcurrent`, if using) and decreases the `reservoir` value (if using). + */ + readonly weight?: number | null; + /** + * The number milliseconds a job has to finish. Jobs that take longer than their `expiration` will be failed with a `BottleneckError`. + */ + readonly expiration?: number | null; + /** + * Optional identifier, helps with debug output. + */ + readonly id?: string | null; + }; + type StopOptions = { + /** + * When `true`, drop all the RECEIVED, QUEUED and RUNNING jobs. When `false`, allow those jobs to complete before resolving the Promise returned by this method. + */ + readonly dropWaitingJobs?: boolean | null; + /** + * The error message used to drop jobs when `dropWaitingJobs` is `true`. + */ + readonly dropErrorMessage?: string | null; + /** + * The error message used to reject a job added to the limiter after `stop()` has been called. + */ + readonly enqueueErrorMessage?: string | null; + }; + type Callback = (err: any, result: T) => void; + type ClientsList = { client?: any; subscriber?: any }; + type GroupLimiterPair = { key: string; limiter: Bottleneck }; + interface Strategy {} + + type EventInfo = { + readonly args: any[]; + readonly options: { + readonly id: string; + readonly priority: number; + readonly weight: number; + readonly expiration?: number; + }; + }; + type EventInfoDropped = EventInfo & { + readonly task: Function; + readonly promise: Promise; + }; + type EventInfoQueued = EventInfo & { + readonly reachedHWM: boolean; + readonly blocked: boolean; + }; + type EventInfoRetryable = EventInfo & { readonly retryCount: number; }; + + enum Status { + RECEIVED = "RECEIVED", + QUEUED = "QUEUED", + RUNNING = "RUNNING", + EXECUTING = "EXECUTING", + DONE = "DONE" + } + type Counts = { + RECEIVED: number, + QUEUED: number, + RUNNING: number, + EXECUTING: number, + DONE?: number + }; + + type RedisConnectionOptions = { + /** + * This object is passed directly to NodeRedis' createClient() method. + */ + readonly clientOptions?: any; + /** + * An existing NodeRedis client to use. If using, `clientOptions` will be ignored. + */ + readonly client?: any; + /** + * Optional Redis library from `require('redis')` or equivalent. If not, Bottleneck will attempt to require Redis at runtime. + */ + readonly Redis?: any; + }; + + type IORedisConnectionOptions = { + /** + * This object is passed directly to ioredis' constructor method. + */ + readonly clientOptions?: any; + /** + * When `clusterNodes` is not null, the client will be instantiated by calling `new Redis.Cluster(clusterNodes, clientOptions)`. + */ + readonly clusterNodes?: any; + /** + * An existing ioredis client to use. If using, `clientOptions` and `clusterNodes` will be ignored. + */ + readonly client?: any; + /** + * Optional IORedis library from `require('ioredis')` or equivalent. If not, Bottleneck will attempt to require IORedis at runtime. + */ + readonly Redis?: any; + }; + + type BatcherOptions = { + /** + * Maximum acceptable time (in milliseconds) a request can have to wait before being flushed to the `"batch"` event. + */ + readonly maxTime?: number | null; + /** + * Maximum number of requests in a batch. + */ + readonly maxSize?: number | null; + }; + + class BottleneckError extends Error { + } + + class RedisConnection { + constructor(options?: Bottleneck.RedisConnectionOptions); + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: "error", fn: (error: any) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: "error", fn: (error: any) => void): void; + + /** + * Waits until the connection is ready and returns the raw Node_Redis clients. + */ + ready(): Promise; + + /** + * Close the redis clients. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + } + + class IORedisConnection { + constructor(options?: Bottleneck.IORedisConnectionOptions); + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: "error", fn: (error: any) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: "error", fn: (error: any) => void): void; + + /** + * Waits until the connection is ready and returns the raw ioredis clients. + */ + ready(): Promise; + + /** + * Close the redis clients. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + } + + class Batcher { + constructor(options?: Bottleneck.BatcherOptions); + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: string, fn: Function): void; + on(name: "error", fn: (error: any) => void): void; + on(name: "batch", fn: (batch: any[]) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: string, fn: Function): void; + once(name: "error", fn: (error: any) => void): void; + once(name: "batch", fn: (batch: any[]) => void): void; + + /** + * Add a request to the Batcher. Batches are flushed to the "batch" event. + */ + add(data: any): Promise; + } + + class Group { + constructor(options?: Bottleneck.ConstructorOptions); + + id: string; + datastore: string; + connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection; + + /** + * Returns the limiter for the specified key. + * @param str - The limiter key. + */ + key(str: string): Bottleneck; + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: string, fn: Function): void; + on(name: "error", fn: (error: any) => void): void; + on(name: "created", fn: (limiter: Bottleneck, key: string) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: string, fn: Function): void; + once(name: "error", fn: (error: any) => void): void; + once(name: "created", fn: (limiter: Bottleneck, key: string) => void): void; + + /** + * Removes all registered event listeners. + * @param name - The optional event name to remove listeners from. + */ + removeAllListeners(name?: string): void; + + /** + * Updates the group settings. + * @param options - The new settings. + */ + updateSettings(options: Bottleneck.ConstructorOptions): void; + + /** + * Deletes the limiter for the given key. + * Returns true if a key was deleted. + * @param str - The key + */ + deleteKey(str: string): Promise; + + /** + * Disconnects the underlying redis clients, unless the Group was created with the `connection` option. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + + /** + * Returns all the key-limiter pairs. + */ + limiters(): Bottleneck.GroupLimiterPair[]; + + /** + * Returns all Group keys in the local instance + */ + keys(): string[]; + + /** + * Returns all Group keys in the Cluster + */ + clusterKeys(): Promise; + } + + class Events { + constructor(object: Object); + + /** + * Returns the number of limiters for the event name + * @param name - The event name. + */ + listenerCount(name: string): number; + + /** + * Returns a promise with the first non-null/non-undefined result from a listener + * @param name - The event name. + * @param args - The arguments to pass to the event listeners. + */ + trigger(name: string, ...args: any[]): Promise; + } + } + + class Bottleneck { + public static readonly strategy: { + /** + * When adding a new job to a limiter, if the queue length reaches `highWater`, drop the oldest job with the lowest priority. This is useful when jobs that have been waiting for too long are not important anymore. If all the queued jobs are more important (based on their `priority` value) than the one being added, it will not be added. + */ + readonly LEAK: Bottleneck.Strategy; + /** + * Same as `LEAK`, except it will only drop jobs that are less important than the one being added. If all the queued jobs are as or more important than the new one, it will not be added. + */ + readonly OVERFLOW_PRIORITY: Bottleneck.Strategy; + /** + * When adding a new job to a limiter, if the queue length reaches `highWater`, do not add the new job. This strategy totally ignores priority levels. + */ + readonly OVERFLOW: Bottleneck.Strategy; + /** + * When adding a new job to a limiter, if the queue length reaches `highWater`, the limiter falls into "blocked mode". All queued jobs are dropped and no new jobs will be accepted until the limiter unblocks. It will unblock after `penalty` milliseconds have passed without receiving a new job. `penalty` is equal to `15 * minTime` (or `5000` if `minTime` is `0`) by default and can be changed by calling `changePenalty()`. This strategy is ideal when bruteforce attacks are to be expected. This strategy totally ignores priority levels. + */ + readonly BLOCK: Bottleneck.Strategy; + }; + + constructor(options?: Bottleneck.ConstructorOptions); + + id: string; + datastore: string; + connection?: Bottleneck.RedisConnection | Bottleneck.IORedisConnection; + + /** + * Returns a promise which will be resolved once the limiter is ready to accept jobs + * or rejected if it fails to start up. + */ + ready(): Promise; + + /** + * Returns a datastore-specific object of redis clients. + */ + clients(): Bottleneck.ClientsList; + + /** + * Returns the name of the Redis pubsub channel used for this limiter + */ + channel(): string; + + /** + * Disconnects the underlying redis clients, unless the limiter was created with the `connection` option. + * @param flush - Write transient data before closing. + */ + disconnect(flush?: boolean): Promise; + + /** + * Broadcast a string to every limiter in the Cluster. + */ + publish(message: string): Promise; + + /** + * Returns an object with the current number of jobs per status. + */ + counts(): Bottleneck.Counts; + + /** + * Returns the status of the job with the provided job id. + */ + jobStatus(id: string): Bottleneck.Status; + + /** + * Returns the status of the job with the provided job id. + */ + jobs(status?: Bottleneck.Status): string[]; + + /** + * Returns the number of requests queued. + * @param priority - Returns the number of requests queued with the specified priority. + */ + queued(priority?: number): number; + + /** + * Returns the number of requests queued across the Cluster. + */ + clusterQueued(): Promise; + + /** + * Returns whether there are any jobs currently in the queue or in the process of being added to the queue. + */ + empty(): boolean; + + /** + * Returns the total weight of jobs in a RUNNING or EXECUTING state in the Cluster. + */ + running(): Promise; + + /** + * Returns the total weight of jobs in a DONE state in the Cluster. + */ + done(): Promise; + + /** + * If a request was added right now, would it be run immediately? + * @param weight - The weight of the request + */ + check(weight?: number): Promise; + + /** + * Register an event listener. + * @param name - The event name. + * @param fn - The callback function. + */ + on(name: "error", fn: (error: any) => void): void; + on(name: "empty", fn: () => void): void; + on(name: "idle", fn: () => void): void; + on(name: "depleted", fn: (empty: boolean) => void): void; + on(name: "message", fn: (message: string) => void): void; + on(name: "debug", fn: (message: string, info: any) => void): void; + on(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void; + on(name: "received", fn: (info: Bottleneck.EventInfo) => void): void; + on(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void; + on(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void; + on(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + on(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void; + on(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void; + on(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + + /** + * Register an event listener for one event only. + * @param name - The event name. + * @param fn - The callback function. + */ + once(name: "error", fn: (error: any) => void): void; + once(name: "empty", fn: () => void): void; + once(name: "idle", fn: () => void): void; + once(name: "depleted", fn: (empty: boolean) => void): void; + once(name: "message", fn: (message: string) => void): void; + once(name: "debug", fn: (message: string, info: any) => void): void; + once(name: "dropped", fn: (dropped: Bottleneck.EventInfoDropped) => void): void; + once(name: "received", fn: (info: Bottleneck.EventInfo) => void): void; + once(name: "queued", fn: (info: Bottleneck.EventInfoQueued) => void): void; + once(name: "scheduled", fn: (info: Bottleneck.EventInfo) => void): void; + once(name: "executing", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + once(name: "failed", fn: (error: any, info: Bottleneck.EventInfoRetryable) => Promise | number | void | null): void; + once(name: "retry", fn: (message: string, info: Bottleneck.EventInfoRetryable) => void): void; + once(name: "done", fn: (info: Bottleneck.EventInfoRetryable) => void): void; + + /** + * Removes all registered event listeners. + * @param name - The optional event name to remove listeners from. + */ + removeAllListeners(name?: string): void; + + /** + * Changes the settings for future requests. + * @param options - The new settings. + */ + updateSettings(options?: Bottleneck.ConstructorOptions): Bottleneck; + + /** + * Adds to the reservoir count and returns the new value. + */ + incrementReservoir(incrementBy: number): Promise; + + /** + * The `stop()` method is used to safely shutdown a limiter. It prevents any new jobs from being added to the limiter and waits for all Executing jobs to complete. + */ + stop(options?: Bottleneck.StopOptions): Promise; + + /** + * Returns the current reservoir count, if any. + */ + currentReservoir(): Promise; + + /** + * Chain this limiter to another. + * @param limiter - The limiter that requests to this limiter must also follow. + */ + chain(limiter?: Bottleneck): Bottleneck; + + <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%> + wrap, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike): ((<%_ for (var idx = 1; idx <= count; idx++) { _%><%_ if (idx > 1) { %>, <% } %>arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise) & { withOptions: (options: Bottleneck.JobOptions<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>) => Promise; }; + <%_ } _%> + + <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%> + submit, A<%= idx %><%_ } _%>>(fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback): void; + <%_ } _%> + + <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%> + submit, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%_ for (var idx = 1; idx <= count; idx++) { _%>arg<%= idx %>: A<%= idx %>, <% } _%>callback: Bottleneck.Callback) => void<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>, callback: Bottleneck.Callback): void; + <%_ } _%> + + <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%> + schedule, A<%= idx %><%_ } _%>>(fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise; + <%_ } _%> + + <%_ for (var count of [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) { _%> + schedule, A<%= idx %><%_ } _%>>(options: Bottleneck.JobOptions, fn: (<%= Array.apply(null, Array(count)).map((e, i) => i+1).map(i => `arg${i}: A${i}`).join(", ") %>) => PromiseLike<%_ for (var idx = 1; idx <= count; idx++) { _%>, arg<%= idx %>: A<%= idx %><%_ } _%>): Promise; + <%_ } _%> + } + + export default Bottleneck; +} diff --git a/node_modules/bottleneck/bower.json b/node_modules/bottleneck/bower.json new file mode 100644 index 000000000..b72e87ee3 --- /dev/null +++ b/node_modules/bottleneck/bower.json @@ -0,0 +1,30 @@ +{ + "name": "bottleneck", + "main": "bottleneck.js", + "version": "2.19.5", + "homepage": "https://github.com/SGrondin/bottleneck", + "authors": [ + "SGrondin " + ], + "description": "Distributed task scheduler and rate limiter", + "moduleType": [ + "globals", + "node" + ], + "keywords": [ + "async", + "rate", + "limiter", + "limiting", + "throttle", + "throttling", + "load", + "ddos" + ], + "license": "MIT", + "ignore": [ + "**/.*", + "node_modules", + "bower_components" + ] +} diff --git a/node_modules/bottleneck/es5.js b/node_modules/bottleneck/es5.js new file mode 100644 index 000000000..a177b6540 --- /dev/null +++ b/node_modules/bottleneck/es5.js @@ -0,0 +1,5064 @@ +/** + * This file contains the full Bottleneck library (MIT) compiled to ES5. + * https://github.com/SGrondin/bottleneck + * It also contains the regenerator-runtime (MIT), necessary for Babel-generated ES5 code to execute promise and async/await code. + * See the following link for Copyright and License information: + * https://github.com/facebook/regenerator/blob/master/packages/regenerator-runtime/runtime.js + */ +(function (global, factory) { + typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : + typeof define === 'function' && define.amd ? define(factory) : + (global.Bottleneck = factory()); +}(this, (function () { 'use strict'; + + var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {}; + + function createCommonjsModule(fn, module) { + return module = { exports: {} }, fn(module, module.exports), module.exports; + } + + function getCjsExportFromNamespace (n) { + return n && n['default'] || n; + } + + var runtime = createCommonjsModule(function (module) { + /** + * Copyright (c) 2014-present, Facebook, Inc. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + + !(function(global) { + + var Op = Object.prototype; + var hasOwn = Op.hasOwnProperty; + var undefined; // More compressible than void 0. + var $Symbol = typeof Symbol === "function" ? Symbol : {}; + var iteratorSymbol = $Symbol.iterator || "@@iterator"; + var asyncIteratorSymbol = $Symbol.asyncIterator || "@@asyncIterator"; + var toStringTagSymbol = $Symbol.toStringTag || "@@toStringTag"; + var runtime = global.regeneratorRuntime; + if (runtime) { + { + // If regeneratorRuntime is defined globally and we're in a module, + // make the exports object identical to regeneratorRuntime. + module.exports = runtime; + } + // Don't bother evaluating the rest of this file if the runtime was + // already defined globally. + return; + } + + // Define the runtime globally (as expected by generated code) as either + // module.exports (if we're in a module) or a new, empty object. + runtime = global.regeneratorRuntime = module.exports; + + function wrap(innerFn, outerFn, self, tryLocsList) { + // If outerFn provided and outerFn.prototype is a Generator, then outerFn.prototype instanceof Generator. + var protoGenerator = outerFn && outerFn.prototype instanceof Generator ? outerFn : Generator; + var generator = Object.create(protoGenerator.prototype); + var context = new Context(tryLocsList || []); + + // The ._invoke method unifies the implementations of the .next, + // .throw, and .return methods. + generator._invoke = makeInvokeMethod(innerFn, self, context); + + return generator; + } + runtime.wrap = wrap; + + // Try/catch helper to minimize deoptimizations. Returns a completion + // record like context.tryEntries[i].completion. This interface could + // have been (and was previously) designed to take a closure to be + // invoked without arguments, but in all the cases we care about we + // already have an existing method we want to call, so there's no need + // to create a new function object. We can even get away with assuming + // the method takes exactly one argument, since that happens to be true + // in every case, so we don't have to touch the arguments object. The + // only additional allocation required is the completion record, which + // has a stable shape and so hopefully should be cheap to allocate. + function tryCatch(fn, obj, arg) { + try { + return { type: "normal", arg: fn.call(obj, arg) }; + } catch (err) { + return { type: "throw", arg: err }; + } + } + + var GenStateSuspendedStart = "suspendedStart"; + var GenStateSuspendedYield = "suspendedYield"; + var GenStateExecuting = "executing"; + var GenStateCompleted = "completed"; + + // Returning this object from the innerFn has the same effect as + // breaking out of the dispatch switch statement. + var ContinueSentinel = {}; + + // Dummy constructor functions that we use as the .constructor and + // .constructor.prototype properties for functions that return Generator + // objects. For full spec compliance, you may wish to configure your + // minifier not to mangle the names of these two functions. + function Generator() {} + function GeneratorFunction() {} + function GeneratorFunctionPrototype() {} + + // This is a polyfill for %IteratorPrototype% for environments that + // don't natively support it. + var IteratorPrototype = {}; + IteratorPrototype[iteratorSymbol] = function () { + return this; + }; + + var getProto = Object.getPrototypeOf; + var NativeIteratorPrototype = getProto && getProto(getProto(values([]))); + if (NativeIteratorPrototype && + NativeIteratorPrototype !== Op && + hasOwn.call(NativeIteratorPrototype, iteratorSymbol)) { + // This environment has a native %IteratorPrototype%; use it instead + // of the polyfill. + IteratorPrototype = NativeIteratorPrototype; + } + + var Gp = GeneratorFunctionPrototype.prototype = + Generator.prototype = Object.create(IteratorPrototype); + GeneratorFunction.prototype = Gp.constructor = GeneratorFunctionPrototype; + GeneratorFunctionPrototype.constructor = GeneratorFunction; + GeneratorFunctionPrototype[toStringTagSymbol] = + GeneratorFunction.displayName = "GeneratorFunction"; + + // Helper for defining the .next, .throw, and .return methods of the + // Iterator interface in terms of a single ._invoke method. + function defineIteratorMethods(prototype) { + ["next", "throw", "return"].forEach(function(method) { + prototype[method] = function(arg) { + return this._invoke(method, arg); + }; + }); + } + + runtime.isGeneratorFunction = function(genFun) { + var ctor = typeof genFun === "function" && genFun.constructor; + return ctor + ? ctor === GeneratorFunction || + // For the native GeneratorFunction constructor, the best we can + // do is to check its .name property. + (ctor.displayName || ctor.name) === "GeneratorFunction" + : false; + }; + + runtime.mark = function(genFun) { + if (Object.setPrototypeOf) { + Object.setPrototypeOf(genFun, GeneratorFunctionPrototype); + } else { + genFun.__proto__ = GeneratorFunctionPrototype; + if (!(toStringTagSymbol in genFun)) { + genFun[toStringTagSymbol] = "GeneratorFunction"; + } + } + genFun.prototype = Object.create(Gp); + return genFun; + }; + + // Within the body of any async function, `await x` is transformed to + // `yield regeneratorRuntime.awrap(x)`, so that the runtime can test + // `hasOwn.call(value, "__await")` to determine if the yielded value is + // meant to be awaited. + runtime.awrap = function(arg) { + return { __await: arg }; + }; + + function AsyncIterator(generator) { + function invoke(method, arg, resolve, reject) { + var record = tryCatch(generator[method], generator, arg); + if (record.type === "throw") { + reject(record.arg); + } else { + var result = record.arg; + var value = result.value; + if (value && + typeof value === "object" && + hasOwn.call(value, "__await")) { + return Promise.resolve(value.__await).then(function(value) { + invoke("next", value, resolve, reject); + }, function(err) { + invoke("throw", err, resolve, reject); + }); + } + + return Promise.resolve(value).then(function(unwrapped) { + // When a yielded Promise is resolved, its final value becomes + // the .value of the Promise<{value,done}> result for the + // current iteration. + result.value = unwrapped; + resolve(result); + }, function(error) { + // If a rejected Promise was yielded, throw the rejection back + // into the async generator function so it can be handled there. + return invoke("throw", error, resolve, reject); + }); + } + } + + var previousPromise; + + function enqueue(method, arg) { + function callInvokeWithMethodAndArg() { + return new Promise(function(resolve, reject) { + invoke(method, arg, resolve, reject); + }); + } + + return previousPromise = + // If enqueue has been called before, then we want to wait until + // all previous Promises have been resolved before calling invoke, + // so that results are always delivered in the correct order. If + // enqueue has not been called before, then it is important to + // call invoke immediately, without waiting on a callback to fire, + // so that the async generator function has the opportunity to do + // any necessary setup in a predictable way. This predictability + // is why the Promise constructor synchronously invokes its + // executor callback, and why async functions synchronously + // execute code before the first await. Since we implement simple + // async functions in terms of async generators, it is especially + // important to get this right, even though it requires care. + previousPromise ? previousPromise.then( + callInvokeWithMethodAndArg, + // Avoid propagating failures to Promises returned by later + // invocations of the iterator. + callInvokeWithMethodAndArg + ) : callInvokeWithMethodAndArg(); + } + + // Define the unified helper method that is used to implement .next, + // .throw, and .return (see defineIteratorMethods). + this._invoke = enqueue; + } + + defineIteratorMethods(AsyncIterator.prototype); + AsyncIterator.prototype[asyncIteratorSymbol] = function () { + return this; + }; + runtime.AsyncIterator = AsyncIterator; + + // Note that simple async functions are implemented on top of + // AsyncIterator objects; they just return a Promise for the value of + // the final result produced by the iterator. + runtime.async = function(innerFn, outerFn, self, tryLocsList) { + var iter = new AsyncIterator( + wrap(innerFn, outerFn, self, tryLocsList) + ); + + return runtime.isGeneratorFunction(outerFn) + ? iter // If outerFn is a generator, return the full iterator. + : iter.next().then(function(result) { + return result.done ? result.value : iter.next(); + }); + }; + + function makeInvokeMethod(innerFn, self, context) { + var state = GenStateSuspendedStart; + + return function invoke(method, arg) { + if (state === GenStateExecuting) { + throw new Error("Generator is already running"); + } + + if (state === GenStateCompleted) { + if (method === "throw") { + throw arg; + } + + // Be forgiving, per 25.3.3.3.3 of the spec: + // https://people.mozilla.org/~jorendorff/es6-draft.html#sec-generatorresume + return doneResult(); + } + + context.method = method; + context.arg = arg; + + while (true) { + var delegate = context.delegate; + if (delegate) { + var delegateResult = maybeInvokeDelegate(delegate, context); + if (delegateResult) { + if (delegateResult === ContinueSentinel) continue; + return delegateResult; + } + } + + if (context.method === "next") { + // Setting context._sent for legacy support of Babel's + // function.sent implementation. + context.sent = context._sent = context.arg; + + } else if (context.method === "throw") { + if (state === GenStateSuspendedStart) { + state = GenStateCompleted; + throw context.arg; + } + + context.dispatchException(context.arg); + + } else if (context.method === "return") { + context.abrupt("return", context.arg); + } + + state = GenStateExecuting; + + var record = tryCatch(innerFn, self, context); + if (record.type === "normal") { + // If an exception is thrown from innerFn, we leave state === + // GenStateExecuting and loop back for another invocation. + state = context.done + ? GenStateCompleted + : GenStateSuspendedYield; + + if (record.arg === ContinueSentinel) { + continue; + } + + return { + value: record.arg, + done: context.done + }; + + } else if (record.type === "throw") { + state = GenStateCompleted; + // Dispatch the exception by looping back around to the + // context.dispatchException(context.arg) call above. + context.method = "throw"; + context.arg = record.arg; + } + } + }; + } + + // Call delegate.iterator[context.method](context.arg) and handle the + // result, either by returning a { value, done } result from the + // delegate iterator, or by modifying context.method and context.arg, + // setting context.delegate to null, and returning the ContinueSentinel. + function maybeInvokeDelegate(delegate, context) { + var method = delegate.iterator[context.method]; + if (method === undefined) { + // A .throw or .return when the delegate iterator has no .throw + // method always terminates the yield* loop. + context.delegate = null; + + if (context.method === "throw") { + if (delegate.iterator.return) { + // If the delegate iterator has a return method, give it a + // chance to clean up. + context.method = "return"; + context.arg = undefined; + maybeInvokeDelegate(delegate, context); + + if (context.method === "throw") { + // If maybeInvokeDelegate(context) changed context.method from + // "return" to "throw", let that override the TypeError below. + return ContinueSentinel; + } + } + + context.method = "throw"; + context.arg = new TypeError( + "The iterator does not provide a 'throw' method"); + } + + return ContinueSentinel; + } + + var record = tryCatch(method, delegate.iterator, context.arg); + + if (record.type === "throw") { + context.method = "throw"; + context.arg = record.arg; + context.delegate = null; + return ContinueSentinel; + } + + var info = record.arg; + + if (! info) { + context.method = "throw"; + context.arg = new TypeError("iterator result is not an object"); + context.delegate = null; + return ContinueSentinel; + } + + if (info.done) { + // Assign the result of the finished delegate to the temporary + // variable specified by delegate.resultName (see delegateYield). + context[delegate.resultName] = info.value; + + // Resume execution at the desired location (see delegateYield). + context.next = delegate.nextLoc; + + // If context.method was "throw" but the delegate handled the + // exception, let the outer generator proceed normally. If + // context.method was "next", forget context.arg since it has been + // "consumed" by the delegate iterator. If context.method was + // "return", allow the original .return call to continue in the + // outer generator. + if (context.method !== "return") { + context.method = "next"; + context.arg = undefined; + } + + } else { + // Re-yield the result returned by the delegate method. + return info; + } + + // The delegate iterator is finished, so forget it and continue with + // the outer generator. + context.delegate = null; + return ContinueSentinel; + } + + // Define Generator.prototype.{next,throw,return} in terms of the + // unified ._invoke helper method. + defineIteratorMethods(Gp); + + Gp[toStringTagSymbol] = "Generator"; + + // A Generator should always return itself as the iterator object when the + // @@iterator function is called on it. Some browsers' implementations of the + // iterator prototype chain incorrectly implement this, causing the Generator + // object to not be returned from this call. This ensures that doesn't happen. + // See https://github.com/facebook/regenerator/issues/274 for more details. + Gp[iteratorSymbol] = function() { + return this; + }; + + Gp.toString = function() { + return "[object Generator]"; + }; + + function pushTryEntry(locs) { + var entry = { tryLoc: locs[0] }; + + if (1 in locs) { + entry.catchLoc = locs[1]; + } + + if (2 in locs) { + entry.finallyLoc = locs[2]; + entry.afterLoc = locs[3]; + } + + this.tryEntries.push(entry); + } + + function resetTryEntry(entry) { + var record = entry.completion || {}; + record.type = "normal"; + delete record.arg; + entry.completion = record; + } + + function Context(tryLocsList) { + // The root entry object (effectively a try statement without a catch + // or a finally block) gives us a place to store values thrown from + // locations where there is no enclosing try statement. + this.tryEntries = [{ tryLoc: "root" }]; + tryLocsList.forEach(pushTryEntry, this); + this.reset(true); + } + + runtime.keys = function(object) { + var keys = []; + for (var key in object) { + keys.push(key); + } + keys.reverse(); + + // Rather than returning an object with a next method, we keep + // things simple and return the next function itself. + return function next() { + while (keys.length) { + var key = keys.pop(); + if (key in object) { + next.value = key; + next.done = false; + return next; + } + } + + // To avoid creating an additional object, we just hang the .value + // and .done properties off the next function object itself. This + // also ensures that the minifier will not anonymize the function. + next.done = true; + return next; + }; + }; + + function values(iterable) { + if (iterable) { + var iteratorMethod = iterable[iteratorSymbol]; + if (iteratorMethod) { + return iteratorMethod.call(iterable); + } + + if (typeof iterable.next === "function") { + return iterable; + } + + if (!isNaN(iterable.length)) { + var i = -1, next = function next() { + while (++i < iterable.length) { + if (hasOwn.call(iterable, i)) { + next.value = iterable[i]; + next.done = false; + return next; + } + } + + next.value = undefined; + next.done = true; + + return next; + }; + + return next.next = next; + } + } + + // Return an iterator with no values. + return { next: doneResult }; + } + runtime.values = values; + + function doneResult() { + return { value: undefined, done: true }; + } + + Context.prototype = { + constructor: Context, + + reset: function(skipTempReset) { + this.prev = 0; + this.next = 0; + // Resetting context._sent for legacy support of Babel's + // function.sent implementation. + this.sent = this._sent = undefined; + this.done = false; + this.delegate = null; + + this.method = "next"; + this.arg = undefined; + + this.tryEntries.forEach(resetTryEntry); + + if (!skipTempReset) { + for (var name in this) { + // Not sure about the optimal order of these conditions: + if (name.charAt(0) === "t" && + hasOwn.call(this, name) && + !isNaN(+name.slice(1))) { + this[name] = undefined; + } + } + } + }, + + stop: function() { + this.done = true; + + var rootEntry = this.tryEntries[0]; + var rootRecord = rootEntry.completion; + if (rootRecord.type === "throw") { + throw rootRecord.arg; + } + + return this.rval; + }, + + dispatchException: function(exception) { + if (this.done) { + throw exception; + } + + var context = this; + function handle(loc, caught) { + record.type = "throw"; + record.arg = exception; + context.next = loc; + + if (caught) { + // If the dispatched exception was caught by a catch block, + // then let that catch block handle the exception normally. + context.method = "next"; + context.arg = undefined; + } + + return !! caught; + } + + for (var i = this.tryEntries.length - 1; i >= 0; --i) { + var entry = this.tryEntries[i]; + var record = entry.completion; + + if (entry.tryLoc === "root") { + // Exception thrown outside of any try block that could handle + // it, so set the completion value of the entire function to + // throw the exception. + return handle("end"); + } + + if (entry.tryLoc <= this.prev) { + var hasCatch = hasOwn.call(entry, "catchLoc"); + var hasFinally = hasOwn.call(entry, "finallyLoc"); + + if (hasCatch && hasFinally) { + if (this.prev < entry.catchLoc) { + return handle(entry.catchLoc, true); + } else if (this.prev < entry.finallyLoc) { + return handle(entry.finallyLoc); + } + + } else if (hasCatch) { + if (this.prev < entry.catchLoc) { + return handle(entry.catchLoc, true); + } + + } else if (hasFinally) { + if (this.prev < entry.finallyLoc) { + return handle(entry.finallyLoc); + } + + } else { + throw new Error("try statement without catch or finally"); + } + } + } + }, + + abrupt: function(type, arg) { + for (var i = this.tryEntries.length - 1; i >= 0; --i) { + var entry = this.tryEntries[i]; + if (entry.tryLoc <= this.prev && + hasOwn.call(entry, "finallyLoc") && + this.prev < entry.finallyLoc) { + var finallyEntry = entry; + break; + } + } + + if (finallyEntry && + (type === "break" || + type === "continue") && + finallyEntry.tryLoc <= arg && + arg <= finallyEntry.finallyLoc) { + // Ignore the finally entry if control is not jumping to a + // location outside the try/catch block. + finallyEntry = null; + } + + var record = finallyEntry ? finallyEntry.completion : {}; + record.type = type; + record.arg = arg; + + if (finallyEntry) { + this.method = "next"; + this.next = finallyEntry.finallyLoc; + return ContinueSentinel; + } + + return this.complete(record); + }, + + complete: function(record, afterLoc) { + if (record.type === "throw") { + throw record.arg; + } + + if (record.type === "break" || + record.type === "continue") { + this.next = record.arg; + } else if (record.type === "return") { + this.rval = this.arg = record.arg; + this.method = "return"; + this.next = "end"; + } else if (record.type === "normal" && afterLoc) { + this.next = afterLoc; + } + + return ContinueSentinel; + }, + + finish: function(finallyLoc) { + for (var i = this.tryEntries.length - 1; i >= 0; --i) { + var entry = this.tryEntries[i]; + if (entry.finallyLoc === finallyLoc) { + this.complete(entry.completion, entry.afterLoc); + resetTryEntry(entry); + return ContinueSentinel; + } + } + }, + + "catch": function(tryLoc) { + for (var i = this.tryEntries.length - 1; i >= 0; --i) { + var entry = this.tryEntries[i]; + if (entry.tryLoc === tryLoc) { + var record = entry.completion; + if (record.type === "throw") { + var thrown = record.arg; + resetTryEntry(entry); + } + return thrown; + } + } + + // The context.catch method must only be called with a location + // argument that corresponds to a known catch block. + throw new Error("illegal catch attempt"); + }, + + delegateYield: function(iterable, resultName, nextLoc) { + this.delegate = { + iterator: values(iterable), + resultName: resultName, + nextLoc: nextLoc + }; + + if (this.method === "next") { + // Deliberately forget the last sent value so that we don't + // accidentally pass it on to the delegate. + this.arg = undefined; + } + + return ContinueSentinel; + } + }; + })( + // In sloppy mode, unbound `this` refers to the global object, fallback to + // Function constructor if we're in global strict mode. That is sadly a form + // of indirect eval which violates Content Security Policy. + (function() { + return this || (typeof self === "object" && self); + })() || Function("return this")() + ); + }); + + function _typeof(obj) { + if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { + _typeof = function (obj) { + return typeof obj; + }; + } else { + _typeof = function (obj) { + return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; + }; + } + + return _typeof(obj); + } + + function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { + try { + var info = gen[key](arg); + var value = info.value; + } catch (error) { + reject(error); + return; + } + + if (info.done) { + resolve(value); + } else { + Promise.resolve(value).then(_next, _throw); + } + } + + function _asyncToGenerator(fn) { + return function () { + var self = this, + args = arguments; + return new Promise(function (resolve, reject) { + var gen = fn.apply(self, args); + + function _next(value) { + asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); + } + + function _throw(err) { + asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); + } + + _next(undefined); + }); + }; + } + + function _classCallCheck(instance, Constructor) { + if (!(instance instanceof Constructor)) { + throw new TypeError("Cannot call a class as a function"); + } + } + + function _defineProperties(target, props) { + for (var i = 0; i < props.length; i++) { + var descriptor = props[i]; + descriptor.enumerable = descriptor.enumerable || false; + descriptor.configurable = true; + if ("value" in descriptor) descriptor.writable = true; + Object.defineProperty(target, descriptor.key, descriptor); + } + } + + function _createClass(Constructor, protoProps, staticProps) { + if (protoProps) _defineProperties(Constructor.prototype, protoProps); + if (staticProps) _defineProperties(Constructor, staticProps); + return Constructor; + } + + function _inherits(subClass, superClass) { + if (typeof superClass !== "function" && superClass !== null) { + throw new TypeError("Super expression must either be null or a function"); + } + + subClass.prototype = Object.create(superClass && superClass.prototype, { + constructor: { + value: subClass, + writable: true, + configurable: true + } + }); + if (superClass) _setPrototypeOf(subClass, superClass); + } + + function _getPrototypeOf(o) { + _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { + return o.__proto__ || Object.getPrototypeOf(o); + }; + return _getPrototypeOf(o); + } + + function _setPrototypeOf(o, p) { + _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { + o.__proto__ = p; + return o; + }; + + return _setPrototypeOf(o, p); + } + + function isNativeReflectConstruct() { + if (typeof Reflect === "undefined" || !Reflect.construct) return false; + if (Reflect.construct.sham) return false; + if (typeof Proxy === "function") return true; + + try { + Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); + return true; + } catch (e) { + return false; + } + } + + function _construct(Parent, args, Class) { + if (isNativeReflectConstruct()) { + _construct = Reflect.construct; + } else { + _construct = function _construct(Parent, args, Class) { + var a = [null]; + a.push.apply(a, args); + var Constructor = Function.bind.apply(Parent, a); + var instance = new Constructor(); + if (Class) _setPrototypeOf(instance, Class.prototype); + return instance; + }; + } + + return _construct.apply(null, arguments); + } + + function _isNativeFunction(fn) { + return Function.toString.call(fn).indexOf("[native code]") !== -1; + } + + function _wrapNativeSuper(Class) { + var _cache = typeof Map === "function" ? new Map() : undefined; + + _wrapNativeSuper = function _wrapNativeSuper(Class) { + if (Class === null || !_isNativeFunction(Class)) return Class; + + if (typeof Class !== "function") { + throw new TypeError("Super expression must either be null or a function"); + } + + if (typeof _cache !== "undefined") { + if (_cache.has(Class)) return _cache.get(Class); + + _cache.set(Class, Wrapper); + } + + function Wrapper() { + return _construct(Class, arguments, _getPrototypeOf(this).constructor); + } + + Wrapper.prototype = Object.create(Class.prototype, { + constructor: { + value: Wrapper, + enumerable: false, + writable: true, + configurable: true + } + }); + return _setPrototypeOf(Wrapper, Class); + }; + + return _wrapNativeSuper(Class); + } + + function _assertThisInitialized(self) { + if (self === void 0) { + throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); + } + + return self; + } + + function _possibleConstructorReturn(self, call) { + if (call && (typeof call === "object" || typeof call === "function")) { + return call; + } + + return _assertThisInitialized(self); + } + + function _slicedToArray(arr, i) { + return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); + } + + function _toArray(arr) { + return _arrayWithHoles(arr) || _iterableToArray(arr) || _nonIterableRest(); + } + + function _toConsumableArray(arr) { + return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _nonIterableSpread(); + } + + function _arrayWithoutHoles(arr) { + if (Array.isArray(arr)) { + for (var i = 0, arr2 = new Array(arr.length); i < arr.length; i++) arr2[i] = arr[i]; + + return arr2; + } + } + + function _arrayWithHoles(arr) { + if (Array.isArray(arr)) return arr; + } + + function _iterableToArray(iter) { + if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter); + } + + function _iterableToArrayLimit(arr, i) { + var _arr = []; + var _n = true; + var _d = false; + var _e = undefined; + + try { + for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { + _arr.push(_s.value); + + if (i && _arr.length === i) break; + } + } catch (err) { + _d = true; + _e = err; + } finally { + try { + if (!_n && _i["return"] != null) _i["return"](); + } finally { + if (_d) throw _e; + } + } + + return _arr; + } + + function _nonIterableSpread() { + throw new TypeError("Invalid attempt to spread non-iterable instance"); + } + + function _nonIterableRest() { + throw new TypeError("Invalid attempt to destructure non-iterable instance"); + } + + var load = function load(received, defaults) { + var onto = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; + var k, ref, v; + + for (k in defaults) { + v = defaults[k]; + onto[k] = (ref = received[k]) != null ? ref : v; + } + + return onto; + }; + + var overwrite = function overwrite(received, defaults) { + var onto = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; + var k, v; + + for (k in received) { + v = received[k]; + + if (defaults[k] !== void 0) { + onto[k] = v; + } + } + + return onto; + }; + + var parser = { + load: load, + overwrite: overwrite + }; + + var DLList; + + DLList = + /*#__PURE__*/ + function () { + function DLList(incr, decr) { + _classCallCheck(this, DLList); + + this.incr = incr; + this.decr = decr; + this._first = null; + this._last = null; + this.length = 0; + } + + _createClass(DLList, [{ + key: "push", + value: function push(value) { + var node; + this.length++; + + if (typeof this.incr === "function") { + this.incr(); + } + + node = { + value: value, + prev: this._last, + next: null + }; + + if (this._last != null) { + this._last.next = node; + this._last = node; + } else { + this._first = this._last = node; + } + + return void 0; + } + }, { + key: "shift", + value: function shift() { + var value; + + if (this._first == null) { + return; + } else { + this.length--; + + if (typeof this.decr === "function") { + this.decr(); + } + } + + value = this._first.value; + + if ((this._first = this._first.next) != null) { + this._first.prev = null; + } else { + this._last = null; + } + + return value; + } + }, { + key: "first", + value: function first() { + if (this._first != null) { + return this._first.value; + } + } + }, { + key: "getArray", + value: function getArray() { + var node, ref, results; + node = this._first; + results = []; + + while (node != null) { + results.push((ref = node, node = node.next, ref.value)); + } + + return results; + } + }, { + key: "forEachShift", + value: function forEachShift(cb) { + var node; + node = this.shift(); + + while (node != null) { + cb(node), node = this.shift(); + } + + return void 0; + } + }, { + key: "debug", + value: function debug() { + var node, ref, ref1, ref2, results; + node = this._first; + results = []; + + while (node != null) { + results.push((ref = node, node = node.next, { + value: ref.value, + prev: (ref1 = ref.prev) != null ? ref1.value : void 0, + next: (ref2 = ref.next) != null ? ref2.value : void 0 + })); + } + + return results; + } + }]); + + return DLList; + }(); + + var DLList_1 = DLList; + + var Events; + + Events = + /*#__PURE__*/ + function () { + function Events(instance) { + var _this = this; + + _classCallCheck(this, Events); + + this.instance = instance; + this._events = {}; + + if (this.instance.on != null || this.instance.once != null || this.instance.removeAllListeners != null) { + throw new Error("An Emitter already exists for this object"); + } + + this.instance.on = function (name, cb) { + return _this._addListener(name, "many", cb); + }; + + this.instance.once = function (name, cb) { + return _this._addListener(name, "once", cb); + }; + + this.instance.removeAllListeners = function () { + var name = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + + if (name != null) { + return delete _this._events[name]; + } else { + return _this._events = {}; + } + }; + } + + _createClass(Events, [{ + key: "_addListener", + value: function _addListener(name, status, cb) { + var base; + + if ((base = this._events)[name] == null) { + base[name] = []; + } + + this._events[name].push({ + cb: cb, + status: status + }); + + return this.instance; + } + }, { + key: "listenerCount", + value: function listenerCount(name) { + if (this._events[name] != null) { + return this._events[name].length; + } else { + return 0; + } + } + }, { + key: "trigger", + value: function () { + var _trigger = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2(name) { + var _this2 = this; + + var _len, + args, + _key, + e, + promises, + _args2 = arguments; + + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + for (_len = _args2.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { + args[_key - 1] = _args2[_key]; + } + + _context2.prev = 1; + + if (name !== "debug") { + this.trigger("debug", "Event triggered: ".concat(name), args); + } + + if (!(this._events[name] == null)) { + _context2.next = 5; + break; + } + + return _context2.abrupt("return"); + + case 5: + this._events[name] = this._events[name].filter(function (listener) { + return listener.status !== "none"; + }); + promises = this._events[name].map( + /*#__PURE__*/ + function () { + var _ref = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee(listener) { + var e, returned; + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + if (!(listener.status === "none")) { + _context.next = 2; + break; + } + + return _context.abrupt("return"); + + case 2: + if (listener.status === "once") { + listener.status = "none"; + } + + _context.prev = 3; + returned = typeof listener.cb === "function" ? listener.cb.apply(listener, args) : void 0; + + if (!(typeof (returned != null ? returned.then : void 0) === "function")) { + _context.next = 11; + break; + } + + _context.next = 8; + return returned; + + case 8: + return _context.abrupt("return", _context.sent); + + case 11: + return _context.abrupt("return", returned); + + case 12: + _context.next = 19; + break; + + case 14: + _context.prev = 14; + _context.t0 = _context["catch"](3); + e = _context.t0; + + { + _this2.trigger("error", e); + } + + return _context.abrupt("return", null); + + case 19: + case "end": + return _context.stop(); + } + } + }, _callee, null, [[3, 14]]); + })); + + return function (_x2) { + return _ref.apply(this, arguments); + }; + }()); + _context2.next = 9; + return Promise.all(promises); + + case 9: + _context2.t0 = function (x) { + return x != null; + }; + + return _context2.abrupt("return", _context2.sent.find(_context2.t0)); + + case 13: + _context2.prev = 13; + _context2.t1 = _context2["catch"](1); + e = _context2.t1; + + { + this.trigger("error", e); + } + + return _context2.abrupt("return", null); + + case 18: + case "end": + return _context2.stop(); + } + } + }, _callee2, this, [[1, 13]]); + })); + + function trigger(_x) { + return _trigger.apply(this, arguments); + } + + return trigger; + }() + }]); + + return Events; + }(); + + var Events_1 = Events; + + var DLList$1, Events$1, Queues; + DLList$1 = DLList_1; + Events$1 = Events_1; + + Queues = + /*#__PURE__*/ + function () { + function Queues(num_priorities) { + _classCallCheck(this, Queues); + + var i; + this.Events = new Events$1(this); + this._length = 0; + + this._lists = function () { + var _this = this; + + var j, ref, results; + results = []; + + for (i = j = 1, ref = num_priorities; 1 <= ref ? j <= ref : j >= ref; i = 1 <= ref ? ++j : --j) { + results.push(new DLList$1(function () { + return _this.incr(); + }, function () { + return _this.decr(); + })); + } + + return results; + }.call(this); + } + + _createClass(Queues, [{ + key: "incr", + value: function incr() { + if (this._length++ === 0) { + return this.Events.trigger("leftzero"); + } + } + }, { + key: "decr", + value: function decr() { + if (--this._length === 0) { + return this.Events.trigger("zero"); + } + } + }, { + key: "push", + value: function push(job) { + return this._lists[job.options.priority].push(job); + } + }, { + key: "queued", + value: function queued(priority) { + if (priority != null) { + return this._lists[priority].length; + } else { + return this._length; + } + } + }, { + key: "shiftAll", + value: function shiftAll(fn) { + return this._lists.forEach(function (list) { + return list.forEachShift(fn); + }); + } + }, { + key: "getFirst", + value: function getFirst() { + var arr = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : this._lists; + var j, len, list; + + for (j = 0, len = arr.length; j < len; j++) { + list = arr[j]; + + if (list.length > 0) { + return list; + } + } + + return []; + } + }, { + key: "shiftLastFrom", + value: function shiftLastFrom(priority) { + return this.getFirst(this._lists.slice(priority).reverse()).shift(); + } + }]); + + return Queues; + }(); + + var Queues_1 = Queues; + + var BottleneckError; + + BottleneckError = + /*#__PURE__*/ + function (_Error) { + _inherits(BottleneckError, _Error); + + function BottleneckError() { + _classCallCheck(this, BottleneckError); + + return _possibleConstructorReturn(this, _getPrototypeOf(BottleneckError).apply(this, arguments)); + } + + return BottleneckError; + }(_wrapNativeSuper(Error)); + + var BottleneckError_1 = BottleneckError; + + var BottleneckError$1, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser$1; + NUM_PRIORITIES = 10; + DEFAULT_PRIORITY = 5; + parser$1 = parser; + BottleneckError$1 = BottleneckError_1; + + Job = + /*#__PURE__*/ + function () { + function Job(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) { + var _this = this; + + _classCallCheck(this, Job); + + this.task = task; + this.args = args; + this.rejectOnDrop = rejectOnDrop; + this.Events = Events; + this._states = _states; + this.Promise = Promise; + this.options = parser$1.load(options, jobDefaults); + this.options.priority = this._sanitizePriority(this.options.priority); + + if (this.options.id === jobDefaults.id) { + this.options.id = "".concat(this.options.id, "-").concat(this._randomIndex()); + } + + this.promise = new this.Promise(function (_resolve, _reject) { + _this._resolve = _resolve; + _this._reject = _reject; + }); + this.retryCount = 0; + } + + _createClass(Job, [{ + key: "_sanitizePriority", + value: function _sanitizePriority(priority) { + var sProperty; + sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority; + + if (sProperty < 0) { + return 0; + } else if (sProperty > NUM_PRIORITIES - 1) { + return NUM_PRIORITIES - 1; + } else { + return sProperty; + } + } + }, { + key: "_randomIndex", + value: function _randomIndex() { + return Math.random().toString(36).slice(2); + } + }, { + key: "doDrop", + value: function doDrop() { + var _ref = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}, + error = _ref.error, + _ref$message = _ref.message, + message = _ref$message === void 0 ? "This job has been dropped by Bottleneck" : _ref$message; + + if (this._states.remove(this.options.id)) { + if (this.rejectOnDrop) { + this._reject(error != null ? error : new BottleneckError$1(message)); + } + + this.Events.trigger("dropped", { + args: this.args, + options: this.options, + task: this.task, + promise: this.promise + }); + return true; + } else { + return false; + } + } + }, { + key: "_assertStatus", + value: function _assertStatus(expected) { + var status; + status = this._states.jobStatus(this.options.id); + + if (!(status === expected || expected === "DONE" && status === null)) { + throw new BottleneckError$1("Invalid job status ".concat(status, ", expected ").concat(expected, ". Please open an issue at https://github.com/SGrondin/bottleneck/issues")); + } + } + }, { + key: "doReceive", + value: function doReceive() { + this._states.start(this.options.id); + + return this.Events.trigger("received", { + args: this.args, + options: this.options + }); + } + }, { + key: "doQueue", + value: function doQueue(reachedHWM, blocked) { + this._assertStatus("RECEIVED"); + + this._states.next(this.options.id); + + return this.Events.trigger("queued", { + args: this.args, + options: this.options, + reachedHWM: reachedHWM, + blocked: blocked + }); + } + }, { + key: "doRun", + value: function doRun() { + if (this.retryCount === 0) { + this._assertStatus("QUEUED"); + + this._states.next(this.options.id); + } else { + this._assertStatus("EXECUTING"); + } + + return this.Events.trigger("scheduled", { + args: this.args, + options: this.options + }); + } + }, { + key: "doExecute", + value: function () { + var _doExecute = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee(chained, clearGlobalState, run, free) { + var error, eventInfo, passed; + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + if (this.retryCount === 0) { + this._assertStatus("RUNNING"); + + this._states.next(this.options.id); + } else { + this._assertStatus("EXECUTING"); + } + + eventInfo = { + args: this.args, + options: this.options, + retryCount: this.retryCount + }; + this.Events.trigger("executing", eventInfo); + _context.prev = 3; + _context.next = 6; + return chained != null ? chained.schedule.apply(chained, [this.options, this.task].concat(_toConsumableArray(this.args))) : this.task.apply(this, _toConsumableArray(this.args)); + + case 6: + passed = _context.sent; + + if (!clearGlobalState()) { + _context.next = 13; + break; + } + + this.doDone(eventInfo); + _context.next = 11; + return free(this.options, eventInfo); + + case 11: + this._assertStatus("DONE"); + + return _context.abrupt("return", this._resolve(passed)); + + case 13: + _context.next = 19; + break; + + case 15: + _context.prev = 15; + _context.t0 = _context["catch"](3); + error = _context.t0; + return _context.abrupt("return", this._onFailure(error, eventInfo, clearGlobalState, run, free)); + + case 19: + case "end": + return _context.stop(); + } + } + }, _callee, this, [[3, 15]]); + })); + + function doExecute(_x, _x2, _x3, _x4) { + return _doExecute.apply(this, arguments); + } + + return doExecute; + }() + }, { + key: "doExpire", + value: function doExpire(clearGlobalState, run, free) { + var error, eventInfo; + + if (this._states.jobStatus(this.options.id === "RUNNING")) { + this._states.next(this.options.id); + } + + this._assertStatus("EXECUTING"); + + eventInfo = { + args: this.args, + options: this.options, + retryCount: this.retryCount + }; + error = new BottleneckError$1("This job timed out after ".concat(this.options.expiration, " ms.")); + return this._onFailure(error, eventInfo, clearGlobalState, run, free); + } + }, { + key: "_onFailure", + value: function () { + var _onFailure2 = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2(error, eventInfo, clearGlobalState, run, free) { + var retry, retryAfter; + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + if (!clearGlobalState()) { + _context2.next = 16; + break; + } + + _context2.next = 3; + return this.Events.trigger("failed", error, eventInfo); + + case 3: + retry = _context2.sent; + + if (!(retry != null)) { + _context2.next = 11; + break; + } + + retryAfter = ~~retry; + this.Events.trigger("retry", "Retrying ".concat(this.options.id, " after ").concat(retryAfter, " ms"), eventInfo); + this.retryCount++; + return _context2.abrupt("return", run(retryAfter)); + + case 11: + this.doDone(eventInfo); + _context2.next = 14; + return free(this.options, eventInfo); + + case 14: + this._assertStatus("DONE"); + + return _context2.abrupt("return", this._reject(error)); + + case 16: + case "end": + return _context2.stop(); + } + } + }, _callee2, this); + })); + + function _onFailure(_x5, _x6, _x7, _x8, _x9) { + return _onFailure2.apply(this, arguments); + } + + return _onFailure; + }() + }, { + key: "doDone", + value: function doDone(eventInfo) { + this._assertStatus("EXECUTING"); + + this._states.next(this.options.id); + + return this.Events.trigger("done", eventInfo); + } + }]); + + return Job; + }(); + + var Job_1 = Job; + + var BottleneckError$2, LocalDatastore, parser$2; + parser$2 = parser; + BottleneckError$2 = BottleneckError_1; + + LocalDatastore = + /*#__PURE__*/ + function () { + function LocalDatastore(instance, storeOptions, storeInstanceOptions) { + _classCallCheck(this, LocalDatastore); + + this.instance = instance; + this.storeOptions = storeOptions; + this.clientId = this.instance._randomIndex(); + parser$2.load(storeInstanceOptions, storeInstanceOptions, this); + this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now(); + this._running = 0; + this._done = 0; + this._unblockTime = 0; + this.ready = this.Promise.resolve(); + this.clients = {}; + + this._startHeartbeat(); + } + + _createClass(LocalDatastore, [{ + key: "_startHeartbeat", + value: function _startHeartbeat() { + var _this = this; + + var base; + + if (this.heartbeat == null && (this.storeOptions.reservoirRefreshInterval != null && this.storeOptions.reservoirRefreshAmount != null || this.storeOptions.reservoirIncreaseInterval != null && this.storeOptions.reservoirIncreaseAmount != null)) { + return typeof (base = this.heartbeat = setInterval(function () { + var amount, incr, maximum, now, reservoir; + now = Date.now(); + + if (_this.storeOptions.reservoirRefreshInterval != null && now >= _this._lastReservoirRefresh + _this.storeOptions.reservoirRefreshInterval) { + _this._lastReservoirRefresh = now; + _this.storeOptions.reservoir = _this.storeOptions.reservoirRefreshAmount; + + _this.instance._drainAll(_this.computeCapacity()); + } + + if (_this.storeOptions.reservoirIncreaseInterval != null && now >= _this._lastReservoirIncrease + _this.storeOptions.reservoirIncreaseInterval) { + var _this$storeOptions = _this.storeOptions; + amount = _this$storeOptions.reservoirIncreaseAmount; + maximum = _this$storeOptions.reservoirIncreaseMaximum; + reservoir = _this$storeOptions.reservoir; + _this._lastReservoirIncrease = now; + incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount; + + if (incr > 0) { + _this.storeOptions.reservoir += incr; + return _this.instance._drainAll(_this.computeCapacity()); + } + } + }, this.heartbeatInterval)).unref === "function" ? base.unref() : void 0; + } else { + return clearInterval(this.heartbeat); + } + } + }, { + key: "__publish__", + value: function () { + var _publish__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee(message) { + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + _context.next = 2; + return this.yieldLoop(); + + case 2: + return _context.abrupt("return", this.instance.Events.trigger("message", message.toString())); + + case 3: + case "end": + return _context.stop(); + } + } + }, _callee, this); + })); + + function __publish__(_x) { + return _publish__.apply(this, arguments); + } + + return __publish__; + }() + }, { + key: "__disconnect__", + value: function () { + var _disconnect__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2(flush) { + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + _context2.next = 2; + return this.yieldLoop(); + + case 2: + clearInterval(this.heartbeat); + return _context2.abrupt("return", this.Promise.resolve()); + + case 4: + case "end": + return _context2.stop(); + } + } + }, _callee2, this); + })); + + function __disconnect__(_x2) { + return _disconnect__.apply(this, arguments); + } + + return __disconnect__; + }() + }, { + key: "yieldLoop", + value: function yieldLoop() { + var t = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0; + return new this.Promise(function (resolve, reject) { + return setTimeout(resolve, t); + }); + } + }, { + key: "computePenalty", + value: function computePenalty() { + var ref; + return (ref = this.storeOptions.penalty) != null ? ref : 15 * this.storeOptions.minTime || 5000; + } + }, { + key: "__updateSettings__", + value: function () { + var _updateSettings__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee3(options) { + return regeneratorRuntime.wrap(function _callee3$(_context3) { + while (1) { + switch (_context3.prev = _context3.next) { + case 0: + _context3.next = 2; + return this.yieldLoop(); + + case 2: + parser$2.overwrite(options, options, this.storeOptions); + + this._startHeartbeat(); + + this.instance._drainAll(this.computeCapacity()); + + return _context3.abrupt("return", true); + + case 6: + case "end": + return _context3.stop(); + } + } + }, _callee3, this); + })); + + function __updateSettings__(_x3) { + return _updateSettings__.apply(this, arguments); + } + + return __updateSettings__; + }() + }, { + key: "__running__", + value: function () { + var _running__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee4() { + return regeneratorRuntime.wrap(function _callee4$(_context4) { + while (1) { + switch (_context4.prev = _context4.next) { + case 0: + _context4.next = 2; + return this.yieldLoop(); + + case 2: + return _context4.abrupt("return", this._running); + + case 3: + case "end": + return _context4.stop(); + } + } + }, _callee4, this); + })); + + function __running__() { + return _running__.apply(this, arguments); + } + + return __running__; + }() + }, { + key: "__queued__", + value: function () { + var _queued__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee5() { + return regeneratorRuntime.wrap(function _callee5$(_context5) { + while (1) { + switch (_context5.prev = _context5.next) { + case 0: + _context5.next = 2; + return this.yieldLoop(); + + case 2: + return _context5.abrupt("return", this.instance.queued()); + + case 3: + case "end": + return _context5.stop(); + } + } + }, _callee5, this); + })); + + function __queued__() { + return _queued__.apply(this, arguments); + } + + return __queued__; + }() + }, { + key: "__done__", + value: function () { + var _done__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee6() { + return regeneratorRuntime.wrap(function _callee6$(_context6) { + while (1) { + switch (_context6.prev = _context6.next) { + case 0: + _context6.next = 2; + return this.yieldLoop(); + + case 2: + return _context6.abrupt("return", this._done); + + case 3: + case "end": + return _context6.stop(); + } + } + }, _callee6, this); + })); + + function __done__() { + return _done__.apply(this, arguments); + } + + return __done__; + }() + }, { + key: "__groupCheck__", + value: function () { + var _groupCheck__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee7(time) { + return regeneratorRuntime.wrap(function _callee7$(_context7) { + while (1) { + switch (_context7.prev = _context7.next) { + case 0: + _context7.next = 2; + return this.yieldLoop(); + + case 2: + return _context7.abrupt("return", this._nextRequest + this.timeout < time); + + case 3: + case "end": + return _context7.stop(); + } + } + }, _callee7, this); + })); + + function __groupCheck__(_x4) { + return _groupCheck__.apply(this, arguments); + } + + return __groupCheck__; + }() + }, { + key: "computeCapacity", + value: function computeCapacity() { + var maxConcurrent, reservoir; + var _this$storeOptions2 = this.storeOptions; + maxConcurrent = _this$storeOptions2.maxConcurrent; + reservoir = _this$storeOptions2.reservoir; + + if (maxConcurrent != null && reservoir != null) { + return Math.min(maxConcurrent - this._running, reservoir); + } else if (maxConcurrent != null) { + return maxConcurrent - this._running; + } else if (reservoir != null) { + return reservoir; + } else { + return null; + } + } + }, { + key: "conditionsCheck", + value: function conditionsCheck(weight) { + var capacity; + capacity = this.computeCapacity(); + return capacity == null || weight <= capacity; + } + }, { + key: "__incrementReservoir__", + value: function () { + var _incrementReservoir__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee8(incr) { + var reservoir; + return regeneratorRuntime.wrap(function _callee8$(_context8) { + while (1) { + switch (_context8.prev = _context8.next) { + case 0: + _context8.next = 2; + return this.yieldLoop(); + + case 2: + reservoir = this.storeOptions.reservoir += incr; + + this.instance._drainAll(this.computeCapacity()); + + return _context8.abrupt("return", reservoir); + + case 5: + case "end": + return _context8.stop(); + } + } + }, _callee8, this); + })); + + function __incrementReservoir__(_x5) { + return _incrementReservoir__.apply(this, arguments); + } + + return __incrementReservoir__; + }() + }, { + key: "__currentReservoir__", + value: function () { + var _currentReservoir__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee9() { + return regeneratorRuntime.wrap(function _callee9$(_context9) { + while (1) { + switch (_context9.prev = _context9.next) { + case 0: + _context9.next = 2; + return this.yieldLoop(); + + case 2: + return _context9.abrupt("return", this.storeOptions.reservoir); + + case 3: + case "end": + return _context9.stop(); + } + } + }, _callee9, this); + })); + + function __currentReservoir__() { + return _currentReservoir__.apply(this, arguments); + } + + return __currentReservoir__; + }() + }, { + key: "isBlocked", + value: function isBlocked(now) { + return this._unblockTime >= now; + } + }, { + key: "check", + value: function check(weight, now) { + return this.conditionsCheck(weight) && this._nextRequest - now <= 0; + } + }, { + key: "__check__", + value: function () { + var _check__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee10(weight) { + var now; + return regeneratorRuntime.wrap(function _callee10$(_context10) { + while (1) { + switch (_context10.prev = _context10.next) { + case 0: + _context10.next = 2; + return this.yieldLoop(); + + case 2: + now = Date.now(); + return _context10.abrupt("return", this.check(weight, now)); + + case 4: + case "end": + return _context10.stop(); + } + } + }, _callee10, this); + })); + + function __check__(_x6) { + return _check__.apply(this, arguments); + } + + return __check__; + }() + }, { + key: "__register__", + value: function () { + var _register__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee11(index, weight, expiration) { + var now, wait; + return regeneratorRuntime.wrap(function _callee11$(_context11) { + while (1) { + switch (_context11.prev = _context11.next) { + case 0: + _context11.next = 2; + return this.yieldLoop(); + + case 2: + now = Date.now(); + + if (!this.conditionsCheck(weight)) { + _context11.next = 11; + break; + } + + this._running += weight; + + if (this.storeOptions.reservoir != null) { + this.storeOptions.reservoir -= weight; + } + + wait = Math.max(this._nextRequest - now, 0); + this._nextRequest = now + wait + this.storeOptions.minTime; + return _context11.abrupt("return", { + success: true, + wait: wait, + reservoir: this.storeOptions.reservoir + }); + + case 11: + return _context11.abrupt("return", { + success: false + }); + + case 12: + case "end": + return _context11.stop(); + } + } + }, _callee11, this); + })); + + function __register__(_x7, _x8, _x9) { + return _register__.apply(this, arguments); + } + + return __register__; + }() + }, { + key: "strategyIsBlock", + value: function strategyIsBlock() { + return this.storeOptions.strategy === 3; + } + }, { + key: "__submit__", + value: function () { + var _submit__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee12(queueLength, weight) { + var blocked, now, reachedHWM; + return regeneratorRuntime.wrap(function _callee12$(_context12) { + while (1) { + switch (_context12.prev = _context12.next) { + case 0: + _context12.next = 2; + return this.yieldLoop(); + + case 2: + if (!(this.storeOptions.maxConcurrent != null && weight > this.storeOptions.maxConcurrent)) { + _context12.next = 4; + break; + } + + throw new BottleneckError$2("Impossible to add a job having a weight of ".concat(weight, " to a limiter having a maxConcurrent setting of ").concat(this.storeOptions.maxConcurrent)); + + case 4: + now = Date.now(); + reachedHWM = this.storeOptions.highWater != null && queueLength === this.storeOptions.highWater && !this.check(weight, now); + blocked = this.strategyIsBlock() && (reachedHWM || this.isBlocked(now)); + + if (blocked) { + this._unblockTime = now + this.computePenalty(); + this._nextRequest = this._unblockTime + this.storeOptions.minTime; + + this.instance._dropAllQueued(); + } + + return _context12.abrupt("return", { + reachedHWM: reachedHWM, + blocked: blocked, + strategy: this.storeOptions.strategy + }); + + case 9: + case "end": + return _context12.stop(); + } + } + }, _callee12, this); + })); + + function __submit__(_x10, _x11) { + return _submit__.apply(this, arguments); + } + + return __submit__; + }() + }, { + key: "__free__", + value: function () { + var _free__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee13(index, weight) { + return regeneratorRuntime.wrap(function _callee13$(_context13) { + while (1) { + switch (_context13.prev = _context13.next) { + case 0: + _context13.next = 2; + return this.yieldLoop(); + + case 2: + this._running -= weight; + this._done += weight; + + this.instance._drainAll(this.computeCapacity()); + + return _context13.abrupt("return", { + running: this._running + }); + + case 6: + case "end": + return _context13.stop(); + } + } + }, _callee13, this); + })); + + function __free__(_x12, _x13) { + return _free__.apply(this, arguments); + } + + return __free__; + }() + }]); + + return LocalDatastore; + }(); + + var LocalDatastore_1 = LocalDatastore; + + var lua = { + "blacklist_client.lua": "local blacklist = ARGV[num_static_argv + 1]\n\nif redis.call('zscore', client_last_seen_key, blacklist) then\n redis.call('zadd', client_last_seen_key, 0, blacklist)\nend\n\n\nreturn {}\n", + "check.lua": "local weight = tonumber(ARGV[num_static_argv + 1])\n\nlocal capacity = process_tick(now, false)['capacity']\nlocal nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest'))\n\nreturn conditions_check(capacity, weight) and nextRequest - now <= 0\n", + "conditions_check.lua": "local conditions_check = function (capacity, weight)\n return capacity == nil or weight <= capacity\nend\n", + "current_reservoir.lua": "return process_tick(now, false)['reservoir']\n", + "done.lua": "process_tick(now, false)\n\nreturn tonumber(redis.call('hget', settings_key, 'done'))\n", + "free.lua": "local index = ARGV[num_static_argv + 1]\n\nredis.call('zadd', job_expirations_key, 0, index)\n\nreturn process_tick(now, false)['running']\n", + "get_time.lua": "redis.replicate_commands()\n\nlocal get_time = function ()\n local time = redis.call('time')\n\n return tonumber(time[1]..string.sub(time[2], 1, 3))\nend\n", + "group_check.lua": "return not (redis.call('exists', settings_key) == 1)\n", + "heartbeat.lua": "process_tick(now, true)\n", + "increment_reservoir.lua": "local incr = tonumber(ARGV[num_static_argv + 1])\n\nredis.call('hincrby', settings_key, 'reservoir', incr)\n\nlocal reservoir = process_tick(now, true)['reservoir']\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn reservoir\n", + "init.lua": "local clear = tonumber(ARGV[num_static_argv + 1])\nlocal limiter_version = ARGV[num_static_argv + 2]\nlocal num_local_argv = num_static_argv + 2\n\nif clear == 1 then\n redis.call('del', unpack(KEYS))\nend\n\nif redis.call('exists', settings_key) == 0 then\n -- Create\n local args = {'hmset', settings_key}\n\n for i = num_local_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\n end\n\n redis.call(unpack(args))\n redis.call('hmset', settings_key,\n 'nextRequest', now,\n 'lastReservoirRefresh', now,\n 'lastReservoirIncrease', now,\n 'running', 0,\n 'done', 0,\n 'unblockTime', 0,\n 'capacityPriorityCounter', 0\n )\n\nelse\n -- Apply migrations\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'version'\n )\n local id = settings[1]\n local current_version = settings[2]\n\n if current_version ~= limiter_version then\n local version_digits = {}\n for k, v in string.gmatch(current_version, \"([^.]+)\") do\n table.insert(version_digits, tonumber(k))\n end\n\n -- 2.10.0\n if version_digits[2] < 10 then\n redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '')\n redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '')\n redis.call('hsetnx', settings_key, 'done', 0)\n redis.call('hset', settings_key, 'version', '2.10.0')\n end\n\n -- 2.11.1\n if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then\n if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then\n redis.call('hmset', settings_key,\n 'lastReservoirRefresh', now,\n 'version', '2.11.1'\n )\n end\n end\n\n -- 2.14.0\n if version_digits[2] < 14 then\n local old_running_key = 'b_'..id..'_running'\n local old_executing_key = 'b_'..id..'_executing'\n\n if redis.call('exists', old_running_key) == 1 then\n redis.call('rename', old_running_key, job_weights_key)\n end\n if redis.call('exists', old_executing_key) == 1 then\n redis.call('rename', old_executing_key, job_expirations_key)\n end\n redis.call('hset', settings_key, 'version', '2.14.0')\n end\n\n -- 2.15.2\n if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then\n redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0)\n redis.call('hset', settings_key, 'version', '2.15.2')\n end\n\n -- 2.17.0\n if version_digits[2] < 17 then\n redis.call('hsetnx', settings_key, 'clientTimeout', 10000)\n redis.call('hset', settings_key, 'version', '2.17.0')\n end\n\n -- 2.18.0\n if version_digits[2] < 18 then\n redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '')\n redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now)\n redis.call('hset', settings_key, 'version', '2.18.0')\n end\n\n end\n\n process_tick(now, false)\nend\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n", + "process_tick.lua": "local process_tick = function (now, always_publish)\n\n local compute_capacity = function (maxConcurrent, running, reservoir)\n if maxConcurrent ~= nil and reservoir ~= nil then\n return math.min((maxConcurrent - running), reservoir)\n elseif maxConcurrent ~= nil then\n return maxConcurrent - running\n elseif reservoir ~= nil then\n return reservoir\n else\n return nil\n end\n end\n\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'running',\n 'reservoir',\n 'reservoirRefreshInterval',\n 'reservoirRefreshAmount',\n 'lastReservoirRefresh',\n 'reservoirIncreaseInterval',\n 'reservoirIncreaseAmount',\n 'reservoirIncreaseMaximum',\n 'lastReservoirIncrease',\n 'capacityPriorityCounter',\n 'clientTimeout'\n )\n local id = settings[1]\n local maxConcurrent = tonumber(settings[2])\n local running = tonumber(settings[3])\n local reservoir = tonumber(settings[4])\n local reservoirRefreshInterval = tonumber(settings[5])\n local reservoirRefreshAmount = tonumber(settings[6])\n local lastReservoirRefresh = tonumber(settings[7])\n local reservoirIncreaseInterval = tonumber(settings[8])\n local reservoirIncreaseAmount = tonumber(settings[9])\n local reservoirIncreaseMaximum = tonumber(settings[10])\n local lastReservoirIncrease = tonumber(settings[11])\n local capacityPriorityCounter = tonumber(settings[12])\n local clientTimeout = tonumber(settings[13])\n\n local initial_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n --\n -- Process 'running' changes\n --\n local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now)\n\n if #expired > 0 then\n redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now)\n\n local flush_batch = function (batch, acc)\n local weights = redis.call('hmget', job_weights_key, unpack(batch))\n redis.call('hdel', job_weights_key, unpack(batch))\n local clients = redis.call('hmget', job_clients_key, unpack(batch))\n redis.call('hdel', job_clients_key, unpack(batch))\n\n -- Calculate sum of removed weights\n for i = 1, #weights do\n acc['total'] = acc['total'] + (tonumber(weights[i]) or 0)\n end\n\n -- Calculate sum of removed weights by client\n local client_weights = {}\n for i = 1, #clients do\n local removed = tonumber(weights[i]) or 0\n if removed > 0 then\n acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed\n end\n end\n end\n\n local acc = {\n ['total'] = 0,\n ['client_weights'] = {}\n }\n local batch_size = 1000\n\n -- Compute changes to Zsets and apply changes to Hashes\n for i = 1, #expired, batch_size do\n local batch = {}\n for j = i, math.min(i + batch_size - 1, #expired) do\n table.insert(batch, expired[j])\n end\n\n flush_batch(batch, acc)\n end\n\n -- Apply changes to Zsets\n if acc['total'] > 0 then\n redis.call('hincrby', settings_key, 'done', acc['total'])\n running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total']))\n end\n\n for client, weight in pairs(acc['client_weights']) do\n redis.call('zincrby', client_running_key, -weight, client)\n end\n end\n\n --\n -- Process 'reservoir' changes\n --\n local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil\n if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then\n reservoir = reservoirRefreshAmount\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirRefresh', now\n )\n end\n\n local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil\n if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then\n local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval)\n local incr = reservoirIncreaseAmount * num_intervals\n if reservoirIncreaseMaximum ~= nil then\n incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0))\n end\n if incr > 0 then\n reservoir = (reservoir or 0) + incr\n end\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval)\n )\n end\n\n --\n -- Clear unresponsive clients\n --\n local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout))\n local unresponsive_lookup = {}\n local terminated_clients = {}\n for i = 1, #unresponsive do\n unresponsive_lookup[unresponsive[i]] = true\n if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then\n table.insert(terminated_clients, unresponsive[i])\n end\n end\n if #terminated_clients > 0 then\n redis.call('zrem', client_running_key, unpack(terminated_clients))\n redis.call('hdel', client_num_queued_key, unpack(terminated_clients))\n redis.call('zrem', client_last_registered_key, unpack(terminated_clients))\n redis.call('zrem', client_last_seen_key, unpack(terminated_clients))\n end\n\n --\n -- Broadcast capacity changes\n --\n local final_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n if always_publish or (initial_capacity ~= nil and final_capacity == nil) then\n -- always_publish or was not unlimited, now unlimited\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n\n elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then\n -- capacity was increased\n -- send the capacity message to the limiter having the lowest number of running jobs\n -- the tiebreaker is the limiter having not registered a job in the longest time\n\n local lowest_concurrency_value = nil\n local lowest_concurrency_clients = {}\n local lowest_concurrency_last_registered = {}\n local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores')\n\n for i = 1, #client_concurrencies, 2 do\n local client = client_concurrencies[i]\n local concurrency = tonumber(client_concurrencies[i+1])\n\n if (\n lowest_concurrency_value == nil or lowest_concurrency_value == concurrency\n ) and (\n not unresponsive_lookup[client]\n ) and (\n tonumber(redis.call('hget', client_num_queued_key, client)) > 0\n ) then\n lowest_concurrency_value = concurrency\n table.insert(lowest_concurrency_clients, client)\n local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client))\n table.insert(lowest_concurrency_last_registered, last_registered)\n end\n end\n\n if #lowest_concurrency_clients > 0 then\n local position = 1\n local earliest = lowest_concurrency_last_registered[1]\n\n for i,v in ipairs(lowest_concurrency_last_registered) do\n if v < earliest then\n position = i\n earliest = v\n end\n end\n\n local next_client = lowest_concurrency_clients[position]\n redis.call('publish', 'b_'..id,\n 'capacity-priority:'..(final_capacity or '')..\n ':'..next_client..\n ':'..capacityPriorityCounter\n )\n redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1')\n else\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n end\n end\n\n return {\n ['capacity'] = final_capacity,\n ['running'] = running,\n ['reservoir'] = reservoir\n }\nend\n", + "queued.lua": "local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout'))\nlocal valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf')\nlocal client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients))\n\nlocal sum = 0\nfor i = 1, #client_queued do\n sum = sum + tonumber(client_queued[i])\nend\n\nreturn sum\n", + "refresh_expiration.lua": "local refresh_expiration = function (now, nextRequest, groupTimeout)\n\n if groupTimeout ~= nil then\n local ttl = (nextRequest + groupTimeout) - now\n\n for i = 1, #KEYS do\n redis.call('pexpire', KEYS[i], ttl)\n end\n end\n\nend\n", + "refs.lua": "local settings_key = KEYS[1]\nlocal job_weights_key = KEYS[2]\nlocal job_expirations_key = KEYS[3]\nlocal job_clients_key = KEYS[4]\nlocal client_running_key = KEYS[5]\nlocal client_num_queued_key = KEYS[6]\nlocal client_last_registered_key = KEYS[7]\nlocal client_last_seen_key = KEYS[8]\n\nlocal now = tonumber(ARGV[1])\nlocal client = ARGV[2]\n\nlocal num_static_argv = 2\n", + "register.lua": "local index = ARGV[num_static_argv + 1]\nlocal weight = tonumber(ARGV[num_static_argv + 2])\nlocal expiration = tonumber(ARGV[num_static_argv + 3])\n\nlocal state = process_tick(now, false)\nlocal capacity = state['capacity']\nlocal reservoir = state['reservoir']\n\nlocal settings = redis.call('hmget', settings_key,\n 'nextRequest',\n 'minTime',\n 'groupTimeout'\n)\nlocal nextRequest = tonumber(settings[1])\nlocal minTime = tonumber(settings[2])\nlocal groupTimeout = tonumber(settings[3])\n\nif conditions_check(capacity, weight) then\n\n redis.call('hincrby', settings_key, 'running', weight)\n redis.call('hset', job_weights_key, index, weight)\n if expiration ~= nil then\n redis.call('zadd', job_expirations_key, now + expiration, index)\n end\n redis.call('hset', job_clients_key, index, client)\n redis.call('zincrby', client_running_key, weight, client)\n redis.call('hincrby', client_num_queued_key, client, -1)\n redis.call('zadd', client_last_registered_key, now, client)\n\n local wait = math.max(nextRequest - now, 0)\n local newNextRequest = now + wait + minTime\n\n if reservoir == nil then\n redis.call('hset', settings_key,\n 'nextRequest', newNextRequest\n )\n else\n reservoir = reservoir - weight\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'nextRequest', newNextRequest\n )\n end\n\n refresh_expiration(now, newNextRequest, groupTimeout)\n\n return {true, wait, reservoir}\n\nelse\n return {false}\nend\n", + "register_client.lua": "local queued = tonumber(ARGV[num_static_argv + 1])\n\n-- Could have been re-registered concurrently\nif not redis.call('zscore', client_last_seen_key, client) then\n redis.call('zadd', client_running_key, 0, client)\n redis.call('hset', client_num_queued_key, client, queued)\n redis.call('zadd', client_last_registered_key, 0, client)\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n\nreturn {}\n", + "running.lua": "return process_tick(now, false)['running']\n", + "submit.lua": "local queueLength = tonumber(ARGV[num_static_argv + 1])\nlocal weight = tonumber(ARGV[num_static_argv + 2])\n\nlocal capacity = process_tick(now, false)['capacity']\n\nlocal settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'highWater',\n 'nextRequest',\n 'strategy',\n 'unblockTime',\n 'penalty',\n 'minTime',\n 'groupTimeout'\n)\nlocal id = settings[1]\nlocal maxConcurrent = tonumber(settings[2])\nlocal highWater = tonumber(settings[3])\nlocal nextRequest = tonumber(settings[4])\nlocal strategy = tonumber(settings[5])\nlocal unblockTime = tonumber(settings[6])\nlocal penalty = tonumber(settings[7])\nlocal minTime = tonumber(settings[8])\nlocal groupTimeout = tonumber(settings[9])\n\nif maxConcurrent ~= nil and weight > maxConcurrent then\n return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent)\nend\n\nlocal reachedHWM = (highWater ~= nil and queueLength == highWater\n and not (\n conditions_check(capacity, weight)\n and nextRequest - now <= 0\n )\n)\n\nlocal blocked = strategy == 3 and (reachedHWM or unblockTime >= now)\n\nif blocked then\n local computedPenalty = penalty\n if computedPenalty == nil then\n if minTime == 0 then\n computedPenalty = 5000\n else\n computedPenalty = 15 * minTime\n end\n end\n\n local newNextRequest = now + computedPenalty + minTime\n\n redis.call('hmset', settings_key,\n 'unblockTime', now + computedPenalty,\n 'nextRequest', newNextRequest\n )\n\n local clients_queued_reset = redis.call('hkeys', client_num_queued_key)\n local queued_reset = {}\n for i = 1, #clients_queued_reset do\n table.insert(queued_reset, clients_queued_reset[i])\n table.insert(queued_reset, 0)\n end\n redis.call('hmset', client_num_queued_key, unpack(queued_reset))\n\n redis.call('publish', 'b_'..id, 'blocked:')\n\n refresh_expiration(now, newNextRequest, groupTimeout)\nend\n\nif not blocked and not reachedHWM then\n redis.call('hincrby', client_num_queued_key, client, 1)\nend\n\nreturn {reachedHWM, blocked, strategy}\n", + "update_settings.lua": "local args = {'hmset', settings_key}\n\nfor i = num_static_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\nend\n\nredis.call(unpack(args))\n\nprocess_tick(now, true)\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n", + "validate_client.lua": "if not redis.call('zscore', client_last_seen_key, client) then\n return redis.error_reply('UNKNOWN_CLIENT')\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n", + "validate_keys.lua": "if not (redis.call('exists', settings_key) == 1) then\n return redis.error_reply('SETTINGS_KEY_NOT_FOUND')\nend\n" + }; + + var lua$1 = /*#__PURE__*/Object.freeze({ + default: lua + }); + + var require$$0 = getCjsExportFromNamespace(lua$1); + + var Scripts = createCommonjsModule(function (module, exports) { + var headers, lua, templates; + lua = require$$0; + headers = { + refs: lua["refs.lua"], + validate_keys: lua["validate_keys.lua"], + validate_client: lua["validate_client.lua"], + refresh_expiration: lua["refresh_expiration.lua"], + process_tick: lua["process_tick.lua"], + conditions_check: lua["conditions_check.lua"], + get_time: lua["get_time.lua"] + }; + + exports.allKeys = function (id) { + return [ + /* + HASH + */ + "b_".concat(id, "_settings"), + /* + HASH + job index -> weight + */ + "b_".concat(id, "_job_weights"), + /* + ZSET + job index -> expiration + */ + "b_".concat(id, "_job_expirations"), + /* + HASH + job index -> client + */ + "b_".concat(id, "_job_clients"), + /* + ZSET + client -> sum running + */ + "b_".concat(id, "_client_running"), + /* + HASH + client -> num queued + */ + "b_".concat(id, "_client_num_queued"), + /* + ZSET + client -> last job registered + */ + "b_".concat(id, "_client_last_registered"), + /* + ZSET + client -> last seen + */ + "b_".concat(id, "_client_last_seen")]; + }; + + templates = { + init: { + keys: exports.allKeys, + headers: ["process_tick"], + refresh_expiration: true, + code: lua["init.lua"] + }, + group_check: { + keys: exports.allKeys, + headers: [], + refresh_expiration: false, + code: lua["group_check.lua"] + }, + register_client: { + keys: exports.allKeys, + headers: ["validate_keys"], + refresh_expiration: false, + code: lua["register_client.lua"] + }, + blacklist_client: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client"], + refresh_expiration: false, + code: lua["blacklist_client.lua"] + }, + heartbeat: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["heartbeat.lua"] + }, + update_settings: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: true, + code: lua["update_settings.lua"] + }, + running: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["running.lua"] + }, + queued: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client"], + refresh_expiration: false, + code: lua["queued.lua"] + }, + done: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["done.lua"] + }, + check: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], + refresh_expiration: false, + code: lua["check.lua"] + }, + submit: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], + refresh_expiration: true, + code: lua["submit.lua"] + }, + register: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], + refresh_expiration: true, + code: lua["register.lua"] + }, + free: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: true, + code: lua["free.lua"] + }, + current_reservoir: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["current_reservoir.lua"] + }, + increment_reservoir: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: true, + code: lua["increment_reservoir.lua"] + } + }; + exports.names = Object.keys(templates); + + exports.keys = function (name, id) { + return templates[name].keys(id); + }; + + exports.payload = function (name) { + var template; + template = templates[name]; + return Array.prototype.concat(headers.refs, template.headers.map(function (h) { + return headers[h]; + }), template.refresh_expiration ? headers.refresh_expiration : "", template.code).join("\n"); + }; + }); + var Scripts_1 = Scripts.allKeys; + var Scripts_2 = Scripts.names; + var Scripts_3 = Scripts.keys; + var Scripts_4 = Scripts.payload; + + var Events$2, RedisConnection, Scripts$1, parser$3; + parser$3 = parser; + Events$2 = Events_1; + Scripts$1 = Scripts; + + RedisConnection = function () { + var RedisConnection = + /*#__PURE__*/ + function () { + function RedisConnection() { + var _this = this; + + var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; + + _classCallCheck(this, RedisConnection); + + parser$3.load(options, this.defaults, this); + + if (this.Redis == null) { + this.Redis = eval("require")("redis"); // Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option. + } + + if (this.Events == null) { + this.Events = new Events$2(this); + } + + this.terminated = false; + + if (this.client == null) { + this.client = this.Redis.createClient(this.clientOptions); + } + + this.subscriber = this.client.duplicate(); + this.limiters = {}; + this.shas = {}; + this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(function () { + return _this._loadScripts(); + }).then(function () { + return { + client: _this.client, + subscriber: _this.subscriber + }; + }); + } + + _createClass(RedisConnection, [{ + key: "_setup", + value: function _setup(client, sub) { + var _this2 = this; + + client.setMaxListeners(0); + return new this.Promise(function (resolve, reject) { + client.on("error", function (e) { + return _this2.Events.trigger("error", e); + }); + + if (sub) { + client.on("message", function (channel, message) { + var ref; + return (ref = _this2.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0; + }); + } + + if (client.ready) { + return resolve(); + } else { + return client.once("ready", resolve); + } + }); + } + }, { + key: "_loadScript", + value: function _loadScript(name) { + var _this3 = this; + + return new this.Promise(function (resolve, reject) { + var payload; + payload = Scripts$1.payload(name); + return _this3.client.multi([["script", "load", payload]]).exec(function (err, replies) { + if (err != null) { + return reject(err); + } + + _this3.shas[name] = replies[0]; + return resolve(replies[0]); + }); + }); + } + }, { + key: "_loadScripts", + value: function _loadScripts() { + var _this4 = this; + + return this.Promise.all(Scripts$1.names.map(function (k) { + return _this4._loadScript(k); + })); + } + }, { + key: "__runCommand__", + value: function () { + var _runCommand__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee(cmd) { + var _this5 = this; + + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + _context.next = 2; + return this.ready; + + case 2: + return _context.abrupt("return", new this.Promise(function (resolve, reject) { + return _this5.client.multi([cmd]).exec_atomic(function (err, replies) { + if (err != null) { + return reject(err); + } else { + return resolve(replies[0]); + } + }); + })); + + case 3: + case "end": + return _context.stop(); + } + } + }, _callee, this); + })); + + function __runCommand__(_x) { + return _runCommand__.apply(this, arguments); + } + + return __runCommand__; + }() + }, { + key: "__addLimiter__", + value: function __addLimiter__(instance) { + var _this6 = this; + + return this.Promise.all([instance.channel(), instance.channel_client()].map(function (channel) { + return new _this6.Promise(function (resolve, reject) { + var _handler; + + _handler = function handler(chan) { + if (chan === channel) { + _this6.subscriber.removeListener("subscribe", _handler); + + _this6.limiters[channel] = instance; + return resolve(); + } + }; + + _this6.subscriber.on("subscribe", _handler); + + return _this6.subscriber.subscribe(channel); + }); + })); + } + }, { + key: "__removeLimiter__", + value: function __removeLimiter__(instance) { + var _this7 = this; + + return this.Promise.all([instance.channel(), instance.channel_client()].map( + /*#__PURE__*/ + function () { + var _ref = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2(channel) { + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + if (_this7.terminated) { + _context2.next = 3; + break; + } + + _context2.next = 3; + return new _this7.Promise(function (resolve, reject) { + return _this7.subscriber.unsubscribe(channel, function (err, chan) { + if (err != null) { + return reject(err); + } + + if (chan === channel) { + return resolve(); + } + }); + }); + + case 3: + return _context2.abrupt("return", delete _this7.limiters[channel]); + + case 4: + case "end": + return _context2.stop(); + } + } + }, _callee2); + })); + + return function (_x2) { + return _ref.apply(this, arguments); + }; + }())); + } + }, { + key: "__scriptArgs__", + value: function __scriptArgs__(name, id, args, cb) { + var keys; + keys = Scripts$1.keys(name, id); + return [this.shas[name], keys.length].concat(keys, args, cb); + } + }, { + key: "__scriptFn__", + value: function __scriptFn__(name) { + return this.client.evalsha.bind(this.client); + } + }, { + key: "disconnect", + value: function disconnect() { + var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true; + var i, k, len, ref; + ref = Object.keys(this.limiters); + + for (i = 0, len = ref.length; i < len; i++) { + k = ref[i]; + clearInterval(this.limiters[k]._store.heartbeat); + } + + this.limiters = {}; + this.terminated = true; + this.client.end(flush); + this.subscriber.end(flush); + return this.Promise.resolve(); + } + }]); + + return RedisConnection; + }(); + RedisConnection.prototype.datastore = "redis"; + RedisConnection.prototype.defaults = { + Redis: null, + clientOptions: {}, + client: null, + Promise: Promise, + Events: null + }; + return RedisConnection; + }.call(commonjsGlobal); + + var RedisConnection_1 = RedisConnection; + + var Events$3, IORedisConnection, Scripts$2, parser$4; + parser$4 = parser; + Events$3 = Events_1; + Scripts$2 = Scripts; + + IORedisConnection = function () { + var IORedisConnection = + /*#__PURE__*/ + function () { + function IORedisConnection() { + var _this = this; + + var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; + + _classCallCheck(this, IORedisConnection); + + parser$4.load(options, this.defaults, this); + + if (this.Redis == null) { + this.Redis = eval("require")("ioredis"); // Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option. + } + + if (this.Events == null) { + this.Events = new Events$3(this); + } + + this.terminated = false; + + if (this.clusterNodes != null) { + this.client = new this.Redis.Cluster(this.clusterNodes, this.clientOptions); + this.subscriber = new this.Redis.Cluster(this.clusterNodes, this.clientOptions); + } else if (this.client != null && this.client.duplicate == null) { + this.subscriber = new this.Redis.Cluster(this.client.startupNodes, this.client.options); + } else { + if (this.client == null) { + this.client = new this.Redis(this.clientOptions); + } + + this.subscriber = this.client.duplicate(); + } + + this.limiters = {}; + this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(function () { + _this._loadScripts(); + + return { + client: _this.client, + subscriber: _this.subscriber + }; + }); + } + + _createClass(IORedisConnection, [{ + key: "_setup", + value: function _setup(client, sub) { + var _this2 = this; + + client.setMaxListeners(0); + return new this.Promise(function (resolve, reject) { + client.on("error", function (e) { + return _this2.Events.trigger("error", e); + }); + + if (sub) { + client.on("message", function (channel, message) { + var ref; + return (ref = _this2.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0; + }); + } + + if (client.status === "ready") { + return resolve(); + } else { + return client.once("ready", resolve); + } + }); + } + }, { + key: "_loadScripts", + value: function _loadScripts() { + var _this3 = this; + + return Scripts$2.names.forEach(function (name) { + return _this3.client.defineCommand(name, { + lua: Scripts$2.payload(name) + }); + }); + } + }, { + key: "__runCommand__", + value: function () { + var _runCommand__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee(cmd) { + var _, deleted, _ref, _ref2, _ref2$; + + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + _context.next = 2; + return this.ready; + + case 2: + _context.next = 4; + return this.client.pipeline([cmd]).exec(); + + case 4: + _ref = _context.sent; + _ref2 = _slicedToArray(_ref, 1); + _ref2$ = _slicedToArray(_ref2[0], 2); + _ = _ref2$[0]; + deleted = _ref2$[1]; + return _context.abrupt("return", deleted); + + case 10: + case "end": + return _context.stop(); + } + } + }, _callee, this); + })); + + function __runCommand__(_x) { + return _runCommand__.apply(this, arguments); + } + + return __runCommand__; + }() + }, { + key: "__addLimiter__", + value: function __addLimiter__(instance) { + var _this4 = this; + + return this.Promise.all([instance.channel(), instance.channel_client()].map(function (channel) { + return new _this4.Promise(function (resolve, reject) { + return _this4.subscriber.subscribe(channel, function () { + _this4.limiters[channel] = instance; + return resolve(); + }); + }); + })); + } + }, { + key: "__removeLimiter__", + value: function __removeLimiter__(instance) { + var _this5 = this; + + return [instance.channel(), instance.channel_client()].forEach( + /*#__PURE__*/ + function () { + var _ref3 = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2(channel) { + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + if (_this5.terminated) { + _context2.next = 3; + break; + } + + _context2.next = 3; + return _this5.subscriber.unsubscribe(channel); + + case 3: + return _context2.abrupt("return", delete _this5.limiters[channel]); + + case 4: + case "end": + return _context2.stop(); + } + } + }, _callee2); + })); + + return function (_x2) { + return _ref3.apply(this, arguments); + }; + }()); + } + }, { + key: "__scriptArgs__", + value: function __scriptArgs__(name, id, args, cb) { + var keys; + keys = Scripts$2.keys(name, id); + return [keys.length].concat(keys, args, cb); + } + }, { + key: "__scriptFn__", + value: function __scriptFn__(name) { + return this.client[name].bind(this.client); + } + }, { + key: "disconnect", + value: function disconnect() { + var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true; + var i, k, len, ref; + ref = Object.keys(this.limiters); + + for (i = 0, len = ref.length; i < len; i++) { + k = ref[i]; + clearInterval(this.limiters[k]._store.heartbeat); + } + + this.limiters = {}; + this.terminated = true; + + if (flush) { + return this.Promise.all([this.client.quit(), this.subscriber.quit()]); + } else { + this.client.disconnect(); + this.subscriber.disconnect(); + return this.Promise.resolve(); + } + } + }]); + + return IORedisConnection; + }(); + IORedisConnection.prototype.datastore = "ioredis"; + IORedisConnection.prototype.defaults = { + Redis: null, + clientOptions: {}, + clusterNodes: null, + client: null, + Promise: Promise, + Events: null + }; + return IORedisConnection; + }.call(commonjsGlobal); + + var IORedisConnection_1 = IORedisConnection; + + var BottleneckError$3, IORedisConnection$1, RedisConnection$1, RedisDatastore, parser$5; + parser$5 = parser; + BottleneckError$3 = BottleneckError_1; + RedisConnection$1 = RedisConnection_1; + IORedisConnection$1 = IORedisConnection_1; + + RedisDatastore = + /*#__PURE__*/ + function () { + function RedisDatastore(instance, storeOptions, storeInstanceOptions) { + var _this = this; + + _classCallCheck(this, RedisDatastore); + + this.instance = instance; + this.storeOptions = storeOptions; + this.originalId = this.instance.id; + this.clientId = this.instance._randomIndex(); + parser$5.load(storeInstanceOptions, storeInstanceOptions, this); + this.clients = {}; + this.capacityPriorityCounters = {}; + this.sharedConnection = this.connection != null; + + if (this.connection == null) { + this.connection = this.instance.datastore === "redis" ? new RedisConnection$1({ + Redis: this.Redis, + clientOptions: this.clientOptions, + Promise: this.Promise, + Events: this.instance.Events + }) : this.instance.datastore === "ioredis" ? new IORedisConnection$1({ + Redis: this.Redis, + clientOptions: this.clientOptions, + clusterNodes: this.clusterNodes, + Promise: this.Promise, + Events: this.instance.Events + }) : void 0; + } + + this.instance.connection = this.connection; + this.instance.datastore = this.connection.datastore; + this.ready = this.connection.ready.then(function (clients) { + _this.clients = clients; + return _this.runScript("init", _this.prepareInitSettings(_this.clearDatastore)); + }).then(function () { + return _this.connection.__addLimiter__(_this.instance); + }).then(function () { + return _this.runScript("register_client", [_this.instance.queued()]); + }).then(function () { + var base; + + if (typeof (base = _this.heartbeat = setInterval(function () { + return _this.runScript("heartbeat", [])["catch"](function (e) { + return _this.instance.Events.trigger("error", e); + }); + }, _this.heartbeatInterval)).unref === "function") { + base.unref(); + } + + return _this.clients; + }); + } + + _createClass(RedisDatastore, [{ + key: "__publish__", + value: function () { + var _publish__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee(message) { + var client, _ref; + + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + _context.next = 2; + return this.ready; + + case 2: + _ref = _context.sent; + client = _ref.client; + return _context.abrupt("return", client.publish(this.instance.channel(), "message:".concat(message.toString()))); + + case 5: + case "end": + return _context.stop(); + } + } + }, _callee, this); + })); + + function __publish__(_x) { + return _publish__.apply(this, arguments); + } + + return __publish__; + }() + }, { + key: "onMessage", + value: function () { + var _onMessage = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee3(channel, message) { + var _this2 = this; + + var capacity, counter, data, drained, e, newCapacity, pos, priorityClient, rawCapacity, type, _ref2, _data$split, _data$split2; + + return regeneratorRuntime.wrap(function _callee3$(_context3) { + while (1) { + switch (_context3.prev = _context3.next) { + case 0: + _context3.prev = 0; + pos = message.indexOf(":"); + _ref2 = [message.slice(0, pos), message.slice(pos + 1)]; + type = _ref2[0]; + data = _ref2[1]; + + if (!(type === "capacity")) { + _context3.next = 11; + break; + } + + _context3.next = 8; + return this.instance._drainAll(data.length > 0 ? ~~data : void 0); + + case 8: + return _context3.abrupt("return", _context3.sent); + + case 11: + if (!(type === "capacity-priority")) { + _context3.next = 37; + break; + } + + _data$split = data.split(":"); + _data$split2 = _slicedToArray(_data$split, 3); + rawCapacity = _data$split2[0]; + priorityClient = _data$split2[1]; + counter = _data$split2[2]; + capacity = rawCapacity.length > 0 ? ~~rawCapacity : void 0; + + if (!(priorityClient === this.clientId)) { + _context3.next = 28; + break; + } + + _context3.next = 21; + return this.instance._drainAll(capacity); + + case 21: + drained = _context3.sent; + newCapacity = capacity != null ? capacity - (drained || 0) : ""; + _context3.next = 25; + return this.clients.client.publish(this.instance.channel(), "capacity-priority:".concat(newCapacity, "::").concat(counter)); + + case 25: + return _context3.abrupt("return", _context3.sent); + + case 28: + if (!(priorityClient === "")) { + _context3.next = 34; + break; + } + + clearTimeout(this.capacityPriorityCounters[counter]); + delete this.capacityPriorityCounters[counter]; + return _context3.abrupt("return", this.instance._drainAll(capacity)); + + case 34: + return _context3.abrupt("return", this.capacityPriorityCounters[counter] = setTimeout( + /*#__PURE__*/ + _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2() { + var e; + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + _context2.prev = 0; + delete _this2.capacityPriorityCounters[counter]; + _context2.next = 4; + return _this2.runScript("blacklist_client", [priorityClient]); + + case 4: + _context2.next = 6; + return _this2.instance._drainAll(capacity); + + case 6: + return _context2.abrupt("return", _context2.sent); + + case 9: + _context2.prev = 9; + _context2.t0 = _context2["catch"](0); + e = _context2.t0; + return _context2.abrupt("return", _this2.instance.Events.trigger("error", e)); + + case 13: + case "end": + return _context2.stop(); + } + } + }, _callee2, null, [[0, 9]]); + })), 1000)); + + case 35: + _context3.next = 45; + break; + + case 37: + if (!(type === "message")) { + _context3.next = 41; + break; + } + + return _context3.abrupt("return", this.instance.Events.trigger("message", data)); + + case 41: + if (!(type === "blocked")) { + _context3.next = 45; + break; + } + + _context3.next = 44; + return this.instance._dropAllQueued(); + + case 44: + return _context3.abrupt("return", _context3.sent); + + case 45: + _context3.next = 51; + break; + + case 47: + _context3.prev = 47; + _context3.t0 = _context3["catch"](0); + e = _context3.t0; + return _context3.abrupt("return", this.instance.Events.trigger("error", e)); + + case 51: + case "end": + return _context3.stop(); + } + } + }, _callee3, this, [[0, 47]]); + })); + + function onMessage(_x2, _x3) { + return _onMessage.apply(this, arguments); + } + + return onMessage; + }() + }, { + key: "__disconnect__", + value: function __disconnect__(flush) { + clearInterval(this.heartbeat); + + if (this.sharedConnection) { + return this.connection.__removeLimiter__(this.instance); + } else { + return this.connection.disconnect(flush); + } + } + }, { + key: "runScript", + value: function () { + var _runScript = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee4(name, args) { + var _this3 = this; + + return regeneratorRuntime.wrap(function _callee4$(_context4) { + while (1) { + switch (_context4.prev = _context4.next) { + case 0: + if (name === "init" || name === "register_client") { + _context4.next = 3; + break; + } + + _context4.next = 3; + return this.ready; + + case 3: + return _context4.abrupt("return", new this.Promise(function (resolve, reject) { + var all_args, arr; + all_args = [Date.now(), _this3.clientId].concat(args); + + _this3.instance.Events.trigger("debug", "Calling Redis script: ".concat(name, ".lua"), all_args); + + arr = _this3.connection.__scriptArgs__(name, _this3.originalId, all_args, function (err, replies) { + if (err != null) { + return reject(err); + } + + return resolve(replies); + }); + return _this3.connection.__scriptFn__(name).apply(void 0, _toConsumableArray(arr)); + })["catch"](function (e) { + if (e.message === "SETTINGS_KEY_NOT_FOUND") { + if (name === "heartbeat") { + return _this3.Promise.resolve(); + } else { + return _this3.runScript("init", _this3.prepareInitSettings(false)).then(function () { + return _this3.runScript(name, args); + }); + } + } else if (e.message === "UNKNOWN_CLIENT") { + return _this3.runScript("register_client", [_this3.instance.queued()]).then(function () { + return _this3.runScript(name, args); + }); + } else { + return _this3.Promise.reject(e); + } + })); + + case 4: + case "end": + return _context4.stop(); + } + } + }, _callee4, this); + })); + + function runScript(_x4, _x5) { + return _runScript.apply(this, arguments); + } + + return runScript; + }() + }, { + key: "prepareArray", + value: function prepareArray(arr) { + var i, len, results, x; + results = []; + + for (i = 0, len = arr.length; i < len; i++) { + x = arr[i]; + results.push(x != null ? x.toString() : ""); + } + + return results; + } + }, { + key: "prepareObject", + value: function prepareObject(obj) { + var arr, k, v; + arr = []; + + for (k in obj) { + v = obj[k]; + arr.push(k, v != null ? v.toString() : ""); + } + + return arr; + } + }, { + key: "prepareInitSettings", + value: function prepareInitSettings(clear) { + var args; + args = this.prepareObject(Object.assign({}, this.storeOptions, { + id: this.originalId, + version: this.instance.version, + groupTimeout: this.timeout, + clientTimeout: this.clientTimeout + })); + args.unshift(clear ? 1 : 0, this.instance.version); + return args; + } + }, { + key: "convertBool", + value: function convertBool(b) { + return !!b; + } + }, { + key: "__updateSettings__", + value: function () { + var _updateSettings__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee5(options) { + return regeneratorRuntime.wrap(function _callee5$(_context5) { + while (1) { + switch (_context5.prev = _context5.next) { + case 0: + _context5.next = 2; + return this.runScript("update_settings", this.prepareObject(options)); + + case 2: + return _context5.abrupt("return", parser$5.overwrite(options, options, this.storeOptions)); + + case 3: + case "end": + return _context5.stop(); + } + } + }, _callee5, this); + })); + + function __updateSettings__(_x6) { + return _updateSettings__.apply(this, arguments); + } + + return __updateSettings__; + }() + }, { + key: "__running__", + value: function __running__() { + return this.runScript("running", []); + } + }, { + key: "__queued__", + value: function __queued__() { + return this.runScript("queued", []); + } + }, { + key: "__done__", + value: function __done__() { + return this.runScript("done", []); + } + }, { + key: "__groupCheck__", + value: function () { + var _groupCheck__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee6() { + return regeneratorRuntime.wrap(function _callee6$(_context6) { + while (1) { + switch (_context6.prev = _context6.next) { + case 0: + _context6.t0 = this; + _context6.next = 3; + return this.runScript("group_check", []); + + case 3: + _context6.t1 = _context6.sent; + return _context6.abrupt("return", _context6.t0.convertBool.call(_context6.t0, _context6.t1)); + + case 5: + case "end": + return _context6.stop(); + } + } + }, _callee6, this); + })); + + function __groupCheck__() { + return _groupCheck__.apply(this, arguments); + } + + return __groupCheck__; + }() + }, { + key: "__incrementReservoir__", + value: function __incrementReservoir__(incr) { + return this.runScript("increment_reservoir", [incr]); + } + }, { + key: "__currentReservoir__", + value: function __currentReservoir__() { + return this.runScript("current_reservoir", []); + } + }, { + key: "__check__", + value: function () { + var _check__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee7(weight) { + return regeneratorRuntime.wrap(function _callee7$(_context7) { + while (1) { + switch (_context7.prev = _context7.next) { + case 0: + _context7.t0 = this; + _context7.next = 3; + return this.runScript("check", this.prepareArray([weight])); + + case 3: + _context7.t1 = _context7.sent; + return _context7.abrupt("return", _context7.t0.convertBool.call(_context7.t0, _context7.t1)); + + case 5: + case "end": + return _context7.stop(); + } + } + }, _callee7, this); + })); + + function __check__(_x7) { + return _check__.apply(this, arguments); + } + + return __check__; + }() + }, { + key: "__register__", + value: function () { + var _register__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee8(index, weight, expiration) { + var reservoir, success, wait, _ref4, _ref5; + + return regeneratorRuntime.wrap(function _callee8$(_context8) { + while (1) { + switch (_context8.prev = _context8.next) { + case 0: + _context8.next = 2; + return this.runScript("register", this.prepareArray([index, weight, expiration])); + + case 2: + _ref4 = _context8.sent; + _ref5 = _slicedToArray(_ref4, 3); + success = _ref5[0]; + wait = _ref5[1]; + reservoir = _ref5[2]; + return _context8.abrupt("return", { + success: this.convertBool(success), + wait: wait, + reservoir: reservoir + }); + + case 8: + case "end": + return _context8.stop(); + } + } + }, _callee8, this); + })); + + function __register__(_x8, _x9, _x10) { + return _register__.apply(this, arguments); + } + + return __register__; + }() + }, { + key: "__submit__", + value: function () { + var _submit__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee9(queueLength, weight) { + var blocked, e, maxConcurrent, overweight, reachedHWM, strategy, _ref6, _ref7, _e$message$split, _e$message$split2; + + return regeneratorRuntime.wrap(function _callee9$(_context9) { + while (1) { + switch (_context9.prev = _context9.next) { + case 0: + _context9.prev = 0; + _context9.next = 3; + return this.runScript("submit", this.prepareArray([queueLength, weight])); + + case 3: + _ref6 = _context9.sent; + _ref7 = _slicedToArray(_ref6, 3); + reachedHWM = _ref7[0]; + blocked = _ref7[1]; + strategy = _ref7[2]; + return _context9.abrupt("return", { + reachedHWM: this.convertBool(reachedHWM), + blocked: this.convertBool(blocked), + strategy: strategy + }); + + case 11: + _context9.prev = 11; + _context9.t0 = _context9["catch"](0); + e = _context9.t0; + + if (!(e.message.indexOf("OVERWEIGHT") === 0)) { + _context9.next = 23; + break; + } + + _e$message$split = e.message.split(":"); + _e$message$split2 = _slicedToArray(_e$message$split, 3); + overweight = _e$message$split2[0]; + weight = _e$message$split2[1]; + maxConcurrent = _e$message$split2[2]; + throw new BottleneckError$3("Impossible to add a job having a weight of ".concat(weight, " to a limiter having a maxConcurrent setting of ").concat(maxConcurrent)); + + case 23: + throw e; + + case 24: + case "end": + return _context9.stop(); + } + } + }, _callee9, this, [[0, 11]]); + })); + + function __submit__(_x11, _x12) { + return _submit__.apply(this, arguments); + } + + return __submit__; + }() + }, { + key: "__free__", + value: function () { + var _free__ = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee10(index, weight) { + var running; + return regeneratorRuntime.wrap(function _callee10$(_context10) { + while (1) { + switch (_context10.prev = _context10.next) { + case 0: + _context10.next = 2; + return this.runScript("free", this.prepareArray([index])); + + case 2: + running = _context10.sent; + return _context10.abrupt("return", { + running: running + }); + + case 4: + case "end": + return _context10.stop(); + } + } + }, _callee10, this); + })); + + function __free__(_x13, _x14) { + return _free__.apply(this, arguments); + } + + return __free__; + }() + }]); + + return RedisDatastore; + }(); + + var RedisDatastore_1 = RedisDatastore; + + var BottleneckError$4, States; + BottleneckError$4 = BottleneckError_1; + + States = + /*#__PURE__*/ + function () { + function States(status1) { + _classCallCheck(this, States); + + this.status = status1; + this._jobs = {}; + this.counts = this.status.map(function () { + return 0; + }); + } + + _createClass(States, [{ + key: "next", + value: function next(id) { + var current, next; + current = this._jobs[id]; + next = current + 1; + + if (current != null && next < this.status.length) { + this.counts[current]--; + this.counts[next]++; + return this._jobs[id]++; + } else if (current != null) { + this.counts[current]--; + return delete this._jobs[id]; + } + } + }, { + key: "start", + value: function start(id) { + var initial; + initial = 0; + this._jobs[id] = initial; + return this.counts[initial]++; + } + }, { + key: "remove", + value: function remove(id) { + var current; + current = this._jobs[id]; + + if (current != null) { + this.counts[current]--; + delete this._jobs[id]; + } + + return current != null; + } + }, { + key: "jobStatus", + value: function jobStatus(id) { + var ref; + return (ref = this.status[this._jobs[id]]) != null ? ref : null; + } + }, { + key: "statusJobs", + value: function statusJobs(status) { + var k, pos, ref, results, v; + + if (status != null) { + pos = this.status.indexOf(status); + + if (pos < 0) { + throw new BottleneckError$4("status must be one of ".concat(this.status.join(', '))); + } + + ref = this._jobs; + results = []; + + for (k in ref) { + v = ref[k]; + + if (v === pos) { + results.push(k); + } + } + + return results; + } else { + return Object.keys(this._jobs); + } + } + }, { + key: "statusCounts", + value: function statusCounts() { + var _this = this; + + return this.counts.reduce(function (acc, v, i) { + acc[_this.status[i]] = v; + return acc; + }, {}); + } + }]); + + return States; + }(); + + var States_1 = States; + + var DLList$2, Sync; + DLList$2 = DLList_1; + + Sync = + /*#__PURE__*/ + function () { + function Sync(name, Promise) { + _classCallCheck(this, Sync); + + this.schedule = this.schedule.bind(this); + this.name = name; + this.Promise = Promise; + this._running = 0; + this._queue = new DLList$2(); + } + + _createClass(Sync, [{ + key: "isEmpty", + value: function isEmpty() { + return this._queue.length === 0; + } + }, { + key: "_tryToRun", + value: function () { + var _tryToRun2 = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2() { + var args, cb, error, reject, resolve, returned, task, _this$_queue$shift; + + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + if (!(this._running < 1 && this._queue.length > 0)) { + _context2.next = 13; + break; + } + + this._running++; + _this$_queue$shift = this._queue.shift(); + task = _this$_queue$shift.task; + args = _this$_queue$shift.args; + resolve = _this$_queue$shift.resolve; + reject = _this$_queue$shift.reject; + _context2.next = 9; + return _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee() { + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + _context.prev = 0; + _context.next = 3; + return task.apply(void 0, _toConsumableArray(args)); + + case 3: + returned = _context.sent; + return _context.abrupt("return", function () { + return resolve(returned); + }); + + case 7: + _context.prev = 7; + _context.t0 = _context["catch"](0); + error = _context.t0; + return _context.abrupt("return", function () { + return reject(error); + }); + + case 11: + case "end": + return _context.stop(); + } + } + }, _callee, null, [[0, 7]]); + }))(); + + case 9: + cb = _context2.sent; + this._running--; + + this._tryToRun(); + + return _context2.abrupt("return", cb()); + + case 13: + case "end": + return _context2.stop(); + } + } + }, _callee2, this); + })); + + function _tryToRun() { + return _tryToRun2.apply(this, arguments); + } + + return _tryToRun; + }() + }, { + key: "schedule", + value: function schedule(task) { + var promise, reject, resolve; + resolve = reject = null; + promise = new this.Promise(function (_resolve, _reject) { + resolve = _resolve; + return reject = _reject; + }); + + for (var _len = arguments.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { + args[_key - 1] = arguments[_key]; + } + + this._queue.push({ + task: task, + args: args, + resolve: resolve, + reject: reject + }); + + this._tryToRun(); + + return promise; + } + }]); + + return Sync; + }(); + + var Sync_1 = Sync; + + var version = "2.19.5"; + var version$1 = { + version: version + }; + + var version$2 = /*#__PURE__*/Object.freeze({ + version: version, + default: version$1 + }); + + var Events$4, Group, IORedisConnection$2, RedisConnection$2, Scripts$3, parser$6; + parser$6 = parser; + Events$4 = Events_1; + RedisConnection$2 = RedisConnection_1; + IORedisConnection$2 = IORedisConnection_1; + Scripts$3 = Scripts; + + Group = function () { + var Group = + /*#__PURE__*/ + function () { + function Group() { + var limiterOptions = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; + + _classCallCheck(this, Group); + + this.deleteKey = this.deleteKey.bind(this); + this.limiterOptions = limiterOptions; + parser$6.load(this.limiterOptions, this.defaults, this); + this.Events = new Events$4(this); + this.instances = {}; + this.Bottleneck = Bottleneck_1; + + this._startAutoCleanup(); + + this.sharedConnection = this.connection != null; + + if (this.connection == null) { + if (this.limiterOptions.datastore === "redis") { + this.connection = new RedisConnection$2(Object.assign({}, this.limiterOptions, { + Events: this.Events + })); + } else if (this.limiterOptions.datastore === "ioredis") { + this.connection = new IORedisConnection$2(Object.assign({}, this.limiterOptions, { + Events: this.Events + })); + } + } + } + + _createClass(Group, [{ + key: "key", + value: function key() { + var _this = this; + + var _key = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ""; + + var ref; + return (ref = this.instances[_key]) != null ? ref : function () { + var limiter; + limiter = _this.instances[_key] = new _this.Bottleneck(Object.assign(_this.limiterOptions, { + id: "".concat(_this.id, "-").concat(_key), + timeout: _this.timeout, + connection: _this.connection + })); + + _this.Events.trigger("created", limiter, _key); + + return limiter; + }(); + } + }, { + key: "deleteKey", + value: function () { + var _deleteKey = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee() { + var key, + deleted, + instance, + _args = arguments; + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + key = _args.length > 0 && _args[0] !== undefined ? _args[0] : ""; + instance = this.instances[key]; + + if (!this.connection) { + _context.next = 6; + break; + } + + _context.next = 5; + return this.connection.__runCommand__(['del'].concat(_toConsumableArray(Scripts$3.allKeys("".concat(this.id, "-").concat(key))))); + + case 5: + deleted = _context.sent; + + case 6: + if (!(instance != null)) { + _context.next = 10; + break; + } + + delete this.instances[key]; + _context.next = 10; + return instance.disconnect(); + + case 10: + return _context.abrupt("return", instance != null || deleted > 0); + + case 11: + case "end": + return _context.stop(); + } + } + }, _callee, this); + })); + + function deleteKey() { + return _deleteKey.apply(this, arguments); + } + + return deleteKey; + }() + }, { + key: "limiters", + value: function limiters() { + var k, ref, results, v; + ref = this.instances; + results = []; + + for (k in ref) { + v = ref[k]; + results.push({ + key: k, + limiter: v + }); + } + + return results; + } + }, { + key: "keys", + value: function keys() { + return Object.keys(this.instances); + } + }, { + key: "clusterKeys", + value: function () { + var _clusterKeys = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2() { + var cursor, end, found, i, k, keys, len, next, start, _ref, _ref2; + + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + if (!(this.connection == null)) { + _context2.next = 2; + break; + } + + return _context2.abrupt("return", this.Promise.resolve(this.keys())); + + case 2: + keys = []; + cursor = null; + start = "b_".concat(this.id, "-").length; + end = "_settings".length; + + case 6: + if (!(cursor !== 0)) { + _context2.next = 17; + break; + } + + _context2.next = 9; + return this.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", "b_".concat(this.id, "-*_settings"), "count", 10000]); + + case 9: + _ref = _context2.sent; + _ref2 = _slicedToArray(_ref, 2); + next = _ref2[0]; + found = _ref2[1]; + cursor = ~~next; + + for (i = 0, len = found.length; i < len; i++) { + k = found[i]; + keys.push(k.slice(start, -end)); + } + + _context2.next = 6; + break; + + case 17: + return _context2.abrupt("return", keys); + + case 18: + case "end": + return _context2.stop(); + } + } + }, _callee2, this); + })); + + function clusterKeys() { + return _clusterKeys.apply(this, arguments); + } + + return clusterKeys; + }() + }, { + key: "_startAutoCleanup", + value: function _startAutoCleanup() { + var _this2 = this; + + var base; + clearInterval(this.interval); + return typeof (base = this.interval = setInterval( + /*#__PURE__*/ + _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee3() { + var e, k, ref, results, time, v; + return regeneratorRuntime.wrap(function _callee3$(_context3) { + while (1) { + switch (_context3.prev = _context3.next) { + case 0: + time = Date.now(); + ref = _this2.instances; + results = []; + _context3.t0 = regeneratorRuntime.keys(ref); + + case 4: + if ((_context3.t1 = _context3.t0()).done) { + _context3.next = 23; + break; + } + + k = _context3.t1.value; + v = ref[k]; + _context3.prev = 7; + _context3.next = 10; + return v._store.__groupCheck__(time); + + case 10: + if (!_context3.sent) { + _context3.next = 14; + break; + } + + results.push(_this2.deleteKey(k)); + _context3.next = 15; + break; + + case 14: + results.push(void 0); + + case 15: + _context3.next = 21; + break; + + case 17: + _context3.prev = 17; + _context3.t2 = _context3["catch"](7); + e = _context3.t2; + results.push(v.Events.trigger("error", e)); + + case 21: + _context3.next = 4; + break; + + case 23: + return _context3.abrupt("return", results); + + case 24: + case "end": + return _context3.stop(); + } + } + }, _callee3, null, [[7, 17]]); + })), this.timeout / 2)).unref === "function" ? base.unref() : void 0; + } + }, { + key: "updateSettings", + value: function updateSettings() { + var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; + parser$6.overwrite(options, this.defaults, this); + parser$6.overwrite(options, options, this.limiterOptions); + + if (options.timeout != null) { + return this._startAutoCleanup(); + } + } + }, { + key: "disconnect", + value: function disconnect() { + var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true; + var ref; + + if (!this.sharedConnection) { + return (ref = this.connection) != null ? ref.disconnect(flush) : void 0; + } + } + }]); + + return Group; + }(); + Group.prototype.defaults = { + timeout: 1000 * 60 * 5, + connection: null, + Promise: Promise, + id: "group-key" + }; + return Group; + }.call(commonjsGlobal); + + var Group_1 = Group; + + var Batcher, Events$5, parser$7; + parser$7 = parser; + Events$5 = Events_1; + + Batcher = function () { + var Batcher = + /*#__PURE__*/ + function () { + function Batcher() { + var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; + + _classCallCheck(this, Batcher); + + this.options = options; + parser$7.load(this.options, this.defaults, this); + this.Events = new Events$5(this); + this._arr = []; + + this._resetPromise(); + + this._lastFlush = Date.now(); + } + + _createClass(Batcher, [{ + key: "_resetPromise", + value: function _resetPromise() { + var _this = this; + + return this._promise = new this.Promise(function (res, rej) { + return _this._resolve = res; + }); + } + }, { + key: "_flush", + value: function _flush() { + clearTimeout(this._timeout); + this._lastFlush = Date.now(); + + this._resolve(); + + this.Events.trigger("batch", this._arr); + this._arr = []; + return this._resetPromise(); + } + }, { + key: "add", + value: function add(data) { + var _this2 = this; + + var ret; + + this._arr.push(data); + + ret = this._promise; + + if (this._arr.length === this.maxSize) { + this._flush(); + } else if (this.maxTime != null && this._arr.length === 1) { + this._timeout = setTimeout(function () { + return _this2._flush(); + }, this.maxTime); + } + + return ret; + } + }]); + + return Batcher; + }(); + Batcher.prototype.defaults = { + maxTime: null, + maxSize: null, + Promise: Promise + }; + return Batcher; + }.call(commonjsGlobal); + + var Batcher_1 = Batcher; + + var require$$8 = getCjsExportFromNamespace(version$2); + + var Bottleneck, + DEFAULT_PRIORITY$1, + Events$6, + Job$1, + LocalDatastore$1, + NUM_PRIORITIES$1, + Queues$1, + RedisDatastore$1, + States$1, + Sync$1, + parser$8, + splice = [].splice; + NUM_PRIORITIES$1 = 10; + DEFAULT_PRIORITY$1 = 5; + parser$8 = parser; + Queues$1 = Queues_1; + Job$1 = Job_1; + LocalDatastore$1 = LocalDatastore_1; + RedisDatastore$1 = RedisDatastore_1; + Events$6 = Events_1; + States$1 = States_1; + Sync$1 = Sync_1; + + Bottleneck = function () { + var Bottleneck = + /*#__PURE__*/ + function () { + function Bottleneck() { + var _this = this; + + var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; + + _classCallCheck(this, Bottleneck); + + var storeInstanceOptions, storeOptions; + this._addToQueue = this._addToQueue.bind(this); + + for (var _len = arguments.length, invalid = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) { + invalid[_key - 1] = arguments[_key]; + } + + this._validateOptions(options, invalid); + + parser$8.load(options, this.instanceDefaults, this); + this._queues = new Queues$1(NUM_PRIORITIES$1); + this._scheduled = {}; + this._states = new States$1(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : [])); + this._limiter = null; + this.Events = new Events$6(this); + this._submitLock = new Sync$1("submit", this.Promise); + this._registerLock = new Sync$1("register", this.Promise); + storeOptions = parser$8.load(options, this.storeDefaults, {}); + + this._store = function () { + if (this.datastore === "redis" || this.datastore === "ioredis" || this.connection != null) { + storeInstanceOptions = parser$8.load(options, this.redisStoreDefaults, {}); + return new RedisDatastore$1(this, storeOptions, storeInstanceOptions); + } else if (this.datastore === "local") { + storeInstanceOptions = parser$8.load(options, this.localStoreDefaults, {}); + return new LocalDatastore$1(this, storeOptions, storeInstanceOptions); + } else { + throw new Bottleneck.prototype.BottleneckError("Invalid datastore type: ".concat(this.datastore)); + } + }.call(this); + + this._queues.on("leftzero", function () { + var ref; + return (ref = _this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0; + }); + + this._queues.on("zero", function () { + var ref; + return (ref = _this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0; + }); + } + + _createClass(Bottleneck, [{ + key: "_validateOptions", + value: function _validateOptions(options, invalid) { + if (!(options != null && _typeof(options) === "object" && invalid.length === 0)) { + throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1."); + } + } + }, { + key: "ready", + value: function ready() { + return this._store.ready; + } + }, { + key: "clients", + value: function clients() { + return this._store.clients; + } + }, { + key: "channel", + value: function channel() { + return "b_".concat(this.id); + } + }, { + key: "channel_client", + value: function channel_client() { + return "b_".concat(this.id, "_").concat(this._store.clientId); + } + }, { + key: "publish", + value: function publish(message) { + return this._store.__publish__(message); + } + }, { + key: "disconnect", + value: function disconnect() { + var flush = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : true; + return this._store.__disconnect__(flush); + } + }, { + key: "chain", + value: function chain(_limiter) { + this._limiter = _limiter; + return this; + } + }, { + key: "queued", + value: function queued(priority) { + return this._queues.queued(priority); + } + }, { + key: "clusterQueued", + value: function clusterQueued() { + return this._store.__queued__(); + } + }, { + key: "empty", + value: function empty() { + return this.queued() === 0 && this._submitLock.isEmpty(); + } + }, { + key: "running", + value: function running() { + return this._store.__running__(); + } + }, { + key: "done", + value: function done() { + return this._store.__done__(); + } + }, { + key: "jobStatus", + value: function jobStatus(id) { + return this._states.jobStatus(id); + } + }, { + key: "jobs", + value: function jobs(status) { + return this._states.statusJobs(status); + } + }, { + key: "counts", + value: function counts() { + return this._states.statusCounts(); + } + }, { + key: "_randomIndex", + value: function _randomIndex() { + return Math.random().toString(36).slice(2); + } + }, { + key: "check", + value: function check() { + var weight = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 1; + return this._store.__check__(weight); + } + }, { + key: "_clearGlobalState", + value: function _clearGlobalState(index) { + if (this._scheduled[index] != null) { + clearTimeout(this._scheduled[index].expiration); + delete this._scheduled[index]; + return true; + } else { + return false; + } + } + }, { + key: "_free", + value: function () { + var _free2 = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee(index, job, options, eventInfo) { + var e, running, _ref; + + return regeneratorRuntime.wrap(function _callee$(_context) { + while (1) { + switch (_context.prev = _context.next) { + case 0: + _context.prev = 0; + _context.next = 3; + return this._store.__free__(index, options.weight); + + case 3: + _ref = _context.sent; + running = _ref.running; + this.Events.trigger("debug", "Freed ".concat(options.id), eventInfo); + + if (!(running === 0 && this.empty())) { + _context.next = 8; + break; + } + + return _context.abrupt("return", this.Events.trigger("idle")); + + case 8: + _context.next = 14; + break; + + case 10: + _context.prev = 10; + _context.t0 = _context["catch"](0); + e = _context.t0; + return _context.abrupt("return", this.Events.trigger("error", e)); + + case 14: + case "end": + return _context.stop(); + } + } + }, _callee, this, [[0, 10]]); + })); + + function _free(_x, _x2, _x3, _x4) { + return _free2.apply(this, arguments); + } + + return _free; + }() + }, { + key: "_run", + value: function _run(index, job, wait) { + var _this2 = this; + + var clearGlobalState, free, run; + job.doRun(); + clearGlobalState = this._clearGlobalState.bind(this, index); + run = this._run.bind(this, index, job); + free = this._free.bind(this, index, job); + return this._scheduled[index] = { + timeout: setTimeout(function () { + return job.doExecute(_this2._limiter, clearGlobalState, run, free); + }, wait), + expiration: job.options.expiration != null ? setTimeout(function () { + return job.doExpire(clearGlobalState, run, free); + }, wait + job.options.expiration) : void 0, + job: job + }; + } + }, { + key: "_drainOne", + value: function _drainOne(capacity) { + var _this3 = this; + + return this._registerLock.schedule(function () { + var args, index, next, options, queue; + + if (_this3.queued() === 0) { + return _this3.Promise.resolve(null); + } + + queue = _this3._queues.getFirst(); + + var _next = next = queue.first(); + + options = _next.options; + args = _next.args; + + if (capacity != null && options.weight > capacity) { + return _this3.Promise.resolve(null); + } + + _this3.Events.trigger("debug", "Draining ".concat(options.id), { + args: args, + options: options + }); + + index = _this3._randomIndex(); + return _this3._store.__register__(index, options.weight, options.expiration).then(function (_ref2) { + var success = _ref2.success, + wait = _ref2.wait, + reservoir = _ref2.reservoir; + var empty; + + _this3.Events.trigger("debug", "Drained ".concat(options.id), { + success: success, + args: args, + options: options + }); + + if (success) { + queue.shift(); + empty = _this3.empty(); + + if (empty) { + _this3.Events.trigger("empty"); + } + + if (reservoir === 0) { + _this3.Events.trigger("depleted", empty); + } + + _this3._run(index, next, wait); + + return _this3.Promise.resolve(options.weight); + } else { + return _this3.Promise.resolve(null); + } + }); + }); + } + }, { + key: "_drainAll", + value: function _drainAll(capacity) { + var _this4 = this; + + var total = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 0; + return this._drainOne(capacity).then(function (drained) { + var newCapacity; + + if (drained != null) { + newCapacity = capacity != null ? capacity - drained : capacity; + return _this4._drainAll(newCapacity, total + drained); + } else { + return _this4.Promise.resolve(total); + } + })["catch"](function (e) { + return _this4.Events.trigger("error", e); + }); + } + }, { + key: "_dropAllQueued", + value: function _dropAllQueued(message) { + return this._queues.shiftAll(function (job) { + return job.doDrop({ + message: message + }); + }); + } + }, { + key: "stop", + value: function stop() { + var _this5 = this; + + var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; + var done, waitForExecuting; + options = parser$8.load(options, this.stopDefaults); + + waitForExecuting = function waitForExecuting(at) { + var finished; + + finished = function finished() { + var counts; + counts = _this5._states.counts; + return counts[0] + counts[1] + counts[2] + counts[3] === at; + }; + + return new _this5.Promise(function (resolve, reject) { + if (finished()) { + return resolve(); + } else { + return _this5.on("done", function () { + if (finished()) { + _this5.removeAllListeners("done"); + + return resolve(); + } + }); + } + }); + }; + + done = options.dropWaitingJobs ? (this._run = function (index, next) { + return next.doDrop({ + message: options.dropErrorMessage + }); + }, this._drainOne = function () { + return _this5.Promise.resolve(null); + }, this._registerLock.schedule(function () { + return _this5._submitLock.schedule(function () { + var k, ref, v; + ref = _this5._scheduled; + + for (k in ref) { + v = ref[k]; + + if (_this5.jobStatus(v.job.options.id) === "RUNNING") { + clearTimeout(v.timeout); + clearTimeout(v.expiration); + v.job.doDrop({ + message: options.dropErrorMessage + }); + } + } + + _this5._dropAllQueued(options.dropErrorMessage); + + return waitForExecuting(0); + }); + })) : this.schedule({ + priority: NUM_PRIORITIES$1 - 1, + weight: 0 + }, function () { + return waitForExecuting(1); + }); + + this._receive = function (job) { + return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage)); + }; + + this.stop = function () { + return _this5.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called")); + }; + + return done; + } + }, { + key: "_addToQueue", + value: function () { + var _addToQueue2 = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee2(job) { + var args, blocked, error, options, reachedHWM, shifted, strategy, _ref3; + + return regeneratorRuntime.wrap(function _callee2$(_context2) { + while (1) { + switch (_context2.prev = _context2.next) { + case 0: + args = job.args; + options = job.options; + _context2.prev = 2; + _context2.next = 5; + return this._store.__submit__(this.queued(), options.weight); + + case 5: + _ref3 = _context2.sent; + reachedHWM = _ref3.reachedHWM; + blocked = _ref3.blocked; + strategy = _ref3.strategy; + _context2.next = 17; + break; + + case 11: + _context2.prev = 11; + _context2.t0 = _context2["catch"](2); + error = _context2.t0; + this.Events.trigger("debug", "Could not queue ".concat(options.id), { + args: args, + options: options, + error: error + }); + job.doDrop({ + error: error + }); + return _context2.abrupt("return", false); + + case 17: + if (!blocked) { + _context2.next = 22; + break; + } + + job.doDrop(); + return _context2.abrupt("return", true); + + case 22: + if (!reachedHWM) { + _context2.next = 28; + break; + } + + shifted = strategy === Bottleneck.prototype.strategy.LEAK ? this._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? this._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0; + + if (shifted != null) { + shifted.doDrop(); + } + + if (!(shifted == null || strategy === Bottleneck.prototype.strategy.OVERFLOW)) { + _context2.next = 28; + break; + } + + if (shifted == null) { + job.doDrop(); + } + + return _context2.abrupt("return", reachedHWM); + + case 28: + job.doQueue(reachedHWM, blocked); + + this._queues.push(job); + + _context2.next = 32; + return this._drainAll(); + + case 32: + return _context2.abrupt("return", reachedHWM); + + case 33: + case "end": + return _context2.stop(); + } + } + }, _callee2, this, [[2, 11]]); + })); + + function _addToQueue(_x5) { + return _addToQueue2.apply(this, arguments); + } + + return _addToQueue; + }() + }, { + key: "_receive", + value: function _receive(job) { + if (this._states.jobStatus(job.options.id) != null) { + job._reject(new Bottleneck.prototype.BottleneckError("A job with the same id already exists (id=".concat(job.options.id, ")"))); + + return false; + } else { + job.doReceive(); + return this._submitLock.schedule(this._addToQueue, job); + } + } + }, { + key: "submit", + value: function submit() { + var _this6 = this; + + for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { + args[_key2] = arguments[_key2]; + } + + var cb, fn, job, options, ref, ref1, task; + + if (typeof args[0] === "function") { + var _ref4, _ref5, _splice$call, _splice$call2; + + ref = args, (_ref4 = ref, _ref5 = _toArray(_ref4), fn = _ref5[0], args = _ref5.slice(1), _ref4), (_splice$call = splice.call(args, -1), _splice$call2 = _slicedToArray(_splice$call, 1), cb = _splice$call2[0], _splice$call); + options = parser$8.load({}, this.jobDefaults); + } else { + var _ref6, _ref7, _splice$call3, _splice$call4; + + ref1 = args, (_ref6 = ref1, _ref7 = _toArray(_ref6), options = _ref7[0], fn = _ref7[1], args = _ref7.slice(2), _ref6), (_splice$call3 = splice.call(args, -1), _splice$call4 = _slicedToArray(_splice$call3, 1), cb = _splice$call4[0], _splice$call3); + options = parser$8.load(options, this.jobDefaults); + } + + task = function task() { + for (var _len3 = arguments.length, args = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) { + args[_key3] = arguments[_key3]; + } + + return new _this6.Promise(function (resolve, reject) { + return fn.apply(void 0, args.concat([function () { + for (var _len4 = arguments.length, args = new Array(_len4), _key4 = 0; _key4 < _len4; _key4++) { + args[_key4] = arguments[_key4]; + } + + return (args[0] != null ? reject : resolve)(args); + }])); + }); + }; + + job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise); + job.promise.then(function (args) { + return typeof cb === "function" ? cb.apply(void 0, _toConsumableArray(args)) : void 0; + })["catch"](function (args) { + if (Array.isArray(args)) { + return typeof cb === "function" ? cb.apply(void 0, _toConsumableArray(args)) : void 0; + } else { + return typeof cb === "function" ? cb(args) : void 0; + } + }); + return this._receive(job); + } + }, { + key: "schedule", + value: function schedule() { + for (var _len5 = arguments.length, args = new Array(_len5), _key5 = 0; _key5 < _len5; _key5++) { + args[_key5] = arguments[_key5]; + } + + var job, options, task; + + if (typeof args[0] === "function") { + var _args3 = args; + + var _args4 = _toArray(_args3); + + task = _args4[0]; + args = _args4.slice(1); + options = {}; + } else { + var _args5 = args; + + var _args6 = _toArray(_args5); + + options = _args6[0]; + task = _args6[1]; + args = _args6.slice(2); + } + + job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise); + + this._receive(job); + + return job.promise; + } + }, { + key: "wrap", + value: function wrap(fn) { + var schedule, wrapped; + schedule = this.schedule.bind(this); + + wrapped = function wrapped() { + for (var _len6 = arguments.length, args = new Array(_len6), _key6 = 0; _key6 < _len6; _key6++) { + args[_key6] = arguments[_key6]; + } + + return schedule.apply(void 0, [fn.bind(this)].concat(args)); + }; + + wrapped.withOptions = function (options) { + for (var _len7 = arguments.length, args = new Array(_len7 > 1 ? _len7 - 1 : 0), _key7 = 1; _key7 < _len7; _key7++) { + args[_key7 - 1] = arguments[_key7]; + } + + return schedule.apply(void 0, [options, fn].concat(args)); + }; + + return wrapped; + } + }, { + key: "updateSettings", + value: function () { + var _updateSettings = _asyncToGenerator( + /*#__PURE__*/ + regeneratorRuntime.mark(function _callee3() { + var options, + _args7 = arguments; + return regeneratorRuntime.wrap(function _callee3$(_context3) { + while (1) { + switch (_context3.prev = _context3.next) { + case 0: + options = _args7.length > 0 && _args7[0] !== undefined ? _args7[0] : {}; + _context3.next = 3; + return this._store.__updateSettings__(parser$8.overwrite(options, this.storeDefaults)); + + case 3: + parser$8.overwrite(options, this.instanceDefaults, this); + return _context3.abrupt("return", this); + + case 5: + case "end": + return _context3.stop(); + } + } + }, _callee3, this); + })); + + function updateSettings() { + return _updateSettings.apply(this, arguments); + } + + return updateSettings; + }() + }, { + key: "currentReservoir", + value: function currentReservoir() { + return this._store.__currentReservoir__(); + } + }, { + key: "incrementReservoir", + value: function incrementReservoir() { + var incr = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : 0; + return this._store.__incrementReservoir__(incr); + } + }]); + + return Bottleneck; + }(); + Bottleneck["default"] = Bottleneck; + Bottleneck.Events = Events$6; + Bottleneck.version = Bottleneck.prototype.version = require$$8.version; + Bottleneck.strategy = Bottleneck.prototype.strategy = { + LEAK: 1, + OVERFLOW: 2, + OVERFLOW_PRIORITY: 4, + BLOCK: 3 + }; + Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = BottleneckError_1; + Bottleneck.Group = Bottleneck.prototype.Group = Group_1; + Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = RedisConnection_1; + Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = IORedisConnection_1; + Bottleneck.Batcher = Bottleneck.prototype.Batcher = Batcher_1; + Bottleneck.prototype.jobDefaults = { + priority: DEFAULT_PRIORITY$1, + weight: 1, + expiration: null, + id: "" + }; + Bottleneck.prototype.storeDefaults = { + maxConcurrent: null, + minTime: 0, + highWater: null, + strategy: Bottleneck.prototype.strategy.LEAK, + penalty: null, + reservoir: null, + reservoirRefreshInterval: null, + reservoirRefreshAmount: null, + reservoirIncreaseInterval: null, + reservoirIncreaseAmount: null, + reservoirIncreaseMaximum: null + }; + Bottleneck.prototype.localStoreDefaults = { + Promise: Promise, + timeout: null, + heartbeatInterval: 250 + }; + Bottleneck.prototype.redisStoreDefaults = { + Promise: Promise, + timeout: null, + heartbeatInterval: 5000, + clientTimeout: 10000, + Redis: null, + clientOptions: {}, + clusterNodes: null, + clearDatastore: false, + connection: null + }; + Bottleneck.prototype.instanceDefaults = { + datastore: "local", + connection: null, + id: "", + rejectOnDrop: true, + trackDoneStatus: false, + Promise: Promise + }; + Bottleneck.prototype.stopDefaults = { + enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.", + dropWaitingJobs: true, + dropErrorMessage: "This limiter has been stopped." + }; + return Bottleneck; + }.call(commonjsGlobal); + + var Bottleneck_1 = Bottleneck; + + var es5 = Bottleneck_1; + + return es5; + +}))); diff --git a/node_modules/bottleneck/lib/Batcher.js b/node_modules/bottleneck/lib/Batcher.js new file mode 100644 index 000000000..f52892afc --- /dev/null +++ b/node_modules/bottleneck/lib/Batcher.js @@ -0,0 +1,66 @@ +"use strict"; + +var Batcher, Events, parser; +parser = require("./parser"); +Events = require("./Events"); + +Batcher = function () { + class Batcher { + constructor(options = {}) { + this.options = options; + parser.load(this.options, this.defaults, this); + this.Events = new Events(this); + this._arr = []; + + this._resetPromise(); + + this._lastFlush = Date.now(); + } + + _resetPromise() { + return this._promise = new this.Promise((res, rej) => { + return this._resolve = res; + }); + } + + _flush() { + clearTimeout(this._timeout); + this._lastFlush = Date.now(); + + this._resolve(); + + this.Events.trigger("batch", this._arr); + this._arr = []; + return this._resetPromise(); + } + + add(data) { + var ret; + + this._arr.push(data); + + ret = this._promise; + + if (this._arr.length === this.maxSize) { + this._flush(); + } else if (this.maxTime != null && this._arr.length === 1) { + this._timeout = setTimeout(() => { + return this._flush(); + }, this.maxTime); + } + + return ret; + } + + } + + ; + Batcher.prototype.defaults = { + maxTime: null, + maxSize: null, + Promise: Promise + }; + return Batcher; +}.call(void 0); + +module.exports = Batcher; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/Bottleneck.js b/node_modules/bottleneck/lib/Bottleneck.js new file mode 100644 index 000000000..ff640a15a --- /dev/null +++ b/node_modules/bottleneck/lib/Bottleneck.js @@ -0,0 +1,594 @@ +"use strict"; + +function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); } + +function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } + +function _toArray(arr) { return _arrayWithHoles(arr) || _iterableToArray(arr) || _nonIterableRest(); } + +function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } + +function _iterableToArray(iter) { if (Symbol.iterator in Object(iter) || Object.prototype.toString.call(iter) === "[object Arguments]") return Array.from(iter); } + +function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var Bottleneck, + DEFAULT_PRIORITY, + Events, + Job, + LocalDatastore, + NUM_PRIORITIES, + Queues, + RedisDatastore, + States, + Sync, + parser, + splice = [].splice; +NUM_PRIORITIES = 10; +DEFAULT_PRIORITY = 5; +parser = require("./parser"); +Queues = require("./Queues"); +Job = require("./Job"); +LocalDatastore = require("./LocalDatastore"); +RedisDatastore = require("./RedisDatastore"); +Events = require("./Events"); +States = require("./States"); +Sync = require("./Sync"); + +Bottleneck = function () { + class Bottleneck { + constructor(options = {}, ...invalid) { + var storeInstanceOptions, storeOptions; + this._addToQueue = this._addToQueue.bind(this); + + this._validateOptions(options, invalid); + + parser.load(options, this.instanceDefaults, this); + this._queues = new Queues(NUM_PRIORITIES); + this._scheduled = {}; + this._states = new States(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : [])); + this._limiter = null; + this.Events = new Events(this); + this._submitLock = new Sync("submit", this.Promise); + this._registerLock = new Sync("register", this.Promise); + storeOptions = parser.load(options, this.storeDefaults, {}); + + this._store = function () { + if (this.datastore === "redis" || this.datastore === "ioredis" || this.connection != null) { + storeInstanceOptions = parser.load(options, this.redisStoreDefaults, {}); + return new RedisDatastore(this, storeOptions, storeInstanceOptions); + } else if (this.datastore === "local") { + storeInstanceOptions = parser.load(options, this.localStoreDefaults, {}); + return new LocalDatastore(this, storeOptions, storeInstanceOptions); + } else { + throw new Bottleneck.prototype.BottleneckError(`Invalid datastore type: ${this.datastore}`); + } + }.call(this); + + this._queues.on("leftzero", () => { + var ref; + return (ref = this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0; + }); + + this._queues.on("zero", () => { + var ref; + return (ref = this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0; + }); + } + + _validateOptions(options, invalid) { + if (!(options != null && typeof options === "object" && invalid.length === 0)) { + throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1."); + } + } + + ready() { + return this._store.ready; + } + + clients() { + return this._store.clients; + } + + channel() { + return `b_${this.id}`; + } + + channel_client() { + return `b_${this.id}_${this._store.clientId}`; + } + + publish(message) { + return this._store.__publish__(message); + } + + disconnect(flush = true) { + return this._store.__disconnect__(flush); + } + + chain(_limiter) { + this._limiter = _limiter; + return this; + } + + queued(priority) { + return this._queues.queued(priority); + } + + clusterQueued() { + return this._store.__queued__(); + } + + empty() { + return this.queued() === 0 && this._submitLock.isEmpty(); + } + + running() { + return this._store.__running__(); + } + + done() { + return this._store.__done__(); + } + + jobStatus(id) { + return this._states.jobStatus(id); + } + + jobs(status) { + return this._states.statusJobs(status); + } + + counts() { + return this._states.statusCounts(); + } + + _randomIndex() { + return Math.random().toString(36).slice(2); + } + + check(weight = 1) { + return this._store.__check__(weight); + } + + _clearGlobalState(index) { + if (this._scheduled[index] != null) { + clearTimeout(this._scheduled[index].expiration); + delete this._scheduled[index]; + return true; + } else { + return false; + } + } + + _free(index, job, options, eventInfo) { + var _this = this; + + return _asyncToGenerator(function* () { + var e, running; + + try { + var _ref = yield _this._store.__free__(index, options.weight); + + running = _ref.running; + + _this.Events.trigger("debug", `Freed ${options.id}`, eventInfo); + + if (running === 0 && _this.empty()) { + return _this.Events.trigger("idle"); + } + } catch (error1) { + e = error1; + return _this.Events.trigger("error", e); + } + })(); + } + + _run(index, job, wait) { + var clearGlobalState, free, run; + job.doRun(); + clearGlobalState = this._clearGlobalState.bind(this, index); + run = this._run.bind(this, index, job); + free = this._free.bind(this, index, job); + return this._scheduled[index] = { + timeout: setTimeout(() => { + return job.doExecute(this._limiter, clearGlobalState, run, free); + }, wait), + expiration: job.options.expiration != null ? setTimeout(function () { + return job.doExpire(clearGlobalState, run, free); + }, wait + job.options.expiration) : void 0, + job: job + }; + } + + _drainOne(capacity) { + return this._registerLock.schedule(() => { + var args, index, next, options, queue; + + if (this.queued() === 0) { + return this.Promise.resolve(null); + } + + queue = this._queues.getFirst(); + + var _next2 = next = queue.first(); + + options = _next2.options; + args = _next2.args; + + if (capacity != null && options.weight > capacity) { + return this.Promise.resolve(null); + } + + this.Events.trigger("debug", `Draining ${options.id}`, { + args, + options + }); + index = this._randomIndex(); + return this._store.__register__(index, options.weight, options.expiration).then(({ + success, + wait, + reservoir + }) => { + var empty; + this.Events.trigger("debug", `Drained ${options.id}`, { + success, + args, + options + }); + + if (success) { + queue.shift(); + empty = this.empty(); + + if (empty) { + this.Events.trigger("empty"); + } + + if (reservoir === 0) { + this.Events.trigger("depleted", empty); + } + + this._run(index, next, wait); + + return this.Promise.resolve(options.weight); + } else { + return this.Promise.resolve(null); + } + }); + }); + } + + _drainAll(capacity, total = 0) { + return this._drainOne(capacity).then(drained => { + var newCapacity; + + if (drained != null) { + newCapacity = capacity != null ? capacity - drained : capacity; + return this._drainAll(newCapacity, total + drained); + } else { + return this.Promise.resolve(total); + } + }).catch(e => { + return this.Events.trigger("error", e); + }); + } + + _dropAllQueued(message) { + return this._queues.shiftAll(function (job) { + return job.doDrop({ + message + }); + }); + } + + stop(options = {}) { + var done, waitForExecuting; + options = parser.load(options, this.stopDefaults); + + waitForExecuting = at => { + var finished; + + finished = () => { + var counts; + counts = this._states.counts; + return counts[0] + counts[1] + counts[2] + counts[3] === at; + }; + + return new this.Promise((resolve, reject) => { + if (finished()) { + return resolve(); + } else { + return this.on("done", () => { + if (finished()) { + this.removeAllListeners("done"); + return resolve(); + } + }); + } + }); + }; + + done = options.dropWaitingJobs ? (this._run = function (index, next) { + return next.doDrop({ + message: options.dropErrorMessage + }); + }, this._drainOne = () => { + return this.Promise.resolve(null); + }, this._registerLock.schedule(() => { + return this._submitLock.schedule(() => { + var k, ref, v; + ref = this._scheduled; + + for (k in ref) { + v = ref[k]; + + if (this.jobStatus(v.job.options.id) === "RUNNING") { + clearTimeout(v.timeout); + clearTimeout(v.expiration); + v.job.doDrop({ + message: options.dropErrorMessage + }); + } + } + + this._dropAllQueued(options.dropErrorMessage); + + return waitForExecuting(0); + }); + })) : this.schedule({ + priority: NUM_PRIORITIES - 1, + weight: 0 + }, () => { + return waitForExecuting(1); + }); + + this._receive = function (job) { + return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage)); + }; + + this.stop = () => { + return this.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called")); + }; + + return done; + } + + _addToQueue(job) { + var _this2 = this; + + return _asyncToGenerator(function* () { + var args, blocked, error, options, reachedHWM, shifted, strategy; + args = job.args; + options = job.options; + + try { + var _ref2 = yield _this2._store.__submit__(_this2.queued(), options.weight); + + reachedHWM = _ref2.reachedHWM; + blocked = _ref2.blocked; + strategy = _ref2.strategy; + } catch (error1) { + error = error1; + + _this2.Events.trigger("debug", `Could not queue ${options.id}`, { + args, + options, + error + }); + + job.doDrop({ + error + }); + return false; + } + + if (blocked) { + job.doDrop(); + return true; + } else if (reachedHWM) { + shifted = strategy === Bottleneck.prototype.strategy.LEAK ? _this2._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? _this2._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0; + + if (shifted != null) { + shifted.doDrop(); + } + + if (shifted == null || strategy === Bottleneck.prototype.strategy.OVERFLOW) { + if (shifted == null) { + job.doDrop(); + } + + return reachedHWM; + } + } + + job.doQueue(reachedHWM, blocked); + + _this2._queues.push(job); + + yield _this2._drainAll(); + return reachedHWM; + })(); + } + + _receive(job) { + if (this._states.jobStatus(job.options.id) != null) { + job._reject(new Bottleneck.prototype.BottleneckError(`A job with the same id already exists (id=${job.options.id})`)); + + return false; + } else { + job.doReceive(); + return this._submitLock.schedule(this._addToQueue, job); + } + } + + submit(...args) { + var cb, fn, job, options, ref, ref1, task; + + if (typeof args[0] === "function") { + var _ref3, _ref4, _splice$call, _splice$call2; + + ref = args, (_ref3 = ref, _ref4 = _toArray(_ref3), fn = _ref4[0], args = _ref4.slice(1), _ref3), (_splice$call = splice.call(args, -1), _splice$call2 = _slicedToArray(_splice$call, 1), cb = _splice$call2[0], _splice$call); + options = parser.load({}, this.jobDefaults); + } else { + var _ref5, _ref6, _splice$call3, _splice$call4; + + ref1 = args, (_ref5 = ref1, _ref6 = _toArray(_ref5), options = _ref6[0], fn = _ref6[1], args = _ref6.slice(2), _ref5), (_splice$call3 = splice.call(args, -1), _splice$call4 = _slicedToArray(_splice$call3, 1), cb = _splice$call4[0], _splice$call3); + options = parser.load(options, this.jobDefaults); + } + + task = (...args) => { + return new this.Promise(function (resolve, reject) { + return fn(...args, function (...args) { + return (args[0] != null ? reject : resolve)(args); + }); + }); + }; + + job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise); + job.promise.then(function (args) { + return typeof cb === "function" ? cb(...args) : void 0; + }).catch(function (args) { + if (Array.isArray(args)) { + return typeof cb === "function" ? cb(...args) : void 0; + } else { + return typeof cb === "function" ? cb(args) : void 0; + } + }); + return this._receive(job); + } + + schedule(...args) { + var job, options, task; + + if (typeof args[0] === "function") { + var _args = args; + + var _args2 = _toArray(_args); + + task = _args2[0]; + args = _args2.slice(1); + options = {}; + } else { + var _args3 = args; + + var _args4 = _toArray(_args3); + + options = _args4[0]; + task = _args4[1]; + args = _args4.slice(2); + } + + job = new Job(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise); + + this._receive(job); + + return job.promise; + } + + wrap(fn) { + var schedule, wrapped; + schedule = this.schedule.bind(this); + + wrapped = function wrapped(...args) { + return schedule(fn.bind(this), ...args); + }; + + wrapped.withOptions = function (options, ...args) { + return schedule(options, fn, ...args); + }; + + return wrapped; + } + + updateSettings(options = {}) { + var _this3 = this; + + return _asyncToGenerator(function* () { + yield _this3._store.__updateSettings__(parser.overwrite(options, _this3.storeDefaults)); + parser.overwrite(options, _this3.instanceDefaults, _this3); + return _this3; + })(); + } + + currentReservoir() { + return this._store.__currentReservoir__(); + } + + incrementReservoir(incr = 0) { + return this._store.__incrementReservoir__(incr); + } + + } + + ; + Bottleneck.default = Bottleneck; + Bottleneck.Events = Events; + Bottleneck.version = Bottleneck.prototype.version = require("./version.json").version; + Bottleneck.strategy = Bottleneck.prototype.strategy = { + LEAK: 1, + OVERFLOW: 2, + OVERFLOW_PRIORITY: 4, + BLOCK: 3 + }; + Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = require("./BottleneckError"); + Bottleneck.Group = Bottleneck.prototype.Group = require("./Group"); + Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = require("./RedisConnection"); + Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = require("./IORedisConnection"); + Bottleneck.Batcher = Bottleneck.prototype.Batcher = require("./Batcher"); + Bottleneck.prototype.jobDefaults = { + priority: DEFAULT_PRIORITY, + weight: 1, + expiration: null, + id: "" + }; + Bottleneck.prototype.storeDefaults = { + maxConcurrent: null, + minTime: 0, + highWater: null, + strategy: Bottleneck.prototype.strategy.LEAK, + penalty: null, + reservoir: null, + reservoirRefreshInterval: null, + reservoirRefreshAmount: null, + reservoirIncreaseInterval: null, + reservoirIncreaseAmount: null, + reservoirIncreaseMaximum: null + }; + Bottleneck.prototype.localStoreDefaults = { + Promise: Promise, + timeout: null, + heartbeatInterval: 250 + }; + Bottleneck.prototype.redisStoreDefaults = { + Promise: Promise, + timeout: null, + heartbeatInterval: 5000, + clientTimeout: 10000, + Redis: null, + clientOptions: {}, + clusterNodes: null, + clearDatastore: false, + connection: null + }; + Bottleneck.prototype.instanceDefaults = { + datastore: "local", + connection: null, + id: "", + rejectOnDrop: true, + trackDoneStatus: false, + Promise: Promise + }; + Bottleneck.prototype.stopDefaults = { + enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.", + dropWaitingJobs: true, + dropErrorMessage: "This limiter has been stopped." + }; + return Bottleneck; +}.call(void 0); + +module.exports = Bottleneck; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/BottleneckError.js b/node_modules/bottleneck/lib/BottleneckError.js new file mode 100644 index 000000000..f8eeaff6e --- /dev/null +++ b/node_modules/bottleneck/lib/BottleneckError.js @@ -0,0 +1,5 @@ +"use strict"; + +var BottleneckError; +BottleneckError = class BottleneckError extends Error {}; +module.exports = BottleneckError; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/DLList.js b/node_modules/bottleneck/lib/DLList.js new file mode 100644 index 000000000..b469a6549 --- /dev/null +++ b/node_modules/bottleneck/lib/DLList.js @@ -0,0 +1,107 @@ +"use strict"; + +var DLList; +DLList = class DLList { + constructor(incr, decr) { + this.incr = incr; + this.decr = decr; + this._first = null; + this._last = null; + this.length = 0; + } + + push(value) { + var node; + this.length++; + + if (typeof this.incr === "function") { + this.incr(); + } + + node = { + value, + prev: this._last, + next: null + }; + + if (this._last != null) { + this._last.next = node; + this._last = node; + } else { + this._first = this._last = node; + } + + return void 0; + } + + shift() { + var value; + + if (this._first == null) { + return; + } else { + this.length--; + + if (typeof this.decr === "function") { + this.decr(); + } + } + + value = this._first.value; + + if ((this._first = this._first.next) != null) { + this._first.prev = null; + } else { + this._last = null; + } + + return value; + } + + first() { + if (this._first != null) { + return this._first.value; + } + } + + getArray() { + var node, ref, results; + node = this._first; + results = []; + + while (node != null) { + results.push((ref = node, node = node.next, ref.value)); + } + + return results; + } + + forEachShift(cb) { + var node; + node = this.shift(); + + while (node != null) { + cb(node), node = this.shift(); + } + + return void 0; + } + + debug() { + var node, ref, ref1, ref2, results; + node = this._first; + results = []; + + while (node != null) { + results.push((ref = node, node = node.next, { + value: ref.value, + prev: (ref1 = ref.prev) != null ? ref1.value : void 0, + next: (ref2 = ref.next) != null ? ref2.value : void 0 + })); + } + + return results; + } + +}; +module.exports = DLList; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/Events.js b/node_modules/bottleneck/lib/Events.js new file mode 100644 index 000000000..e843257e9 --- /dev/null +++ b/node_modules/bottleneck/lib/Events.js @@ -0,0 +1,128 @@ +"use strict"; + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var Events; +Events = class Events { + constructor(instance) { + this.instance = instance; + this._events = {}; + + if (this.instance.on != null || this.instance.once != null || this.instance.removeAllListeners != null) { + throw new Error("An Emitter already exists for this object"); + } + + this.instance.on = (name, cb) => { + return this._addListener(name, "many", cb); + }; + + this.instance.once = (name, cb) => { + return this._addListener(name, "once", cb); + }; + + this.instance.removeAllListeners = (name = null) => { + if (name != null) { + return delete this._events[name]; + } else { + return this._events = {}; + } + }; + } + + _addListener(name, status, cb) { + var base; + + if ((base = this._events)[name] == null) { + base[name] = []; + } + + this._events[name].push({ + cb, + status + }); + + return this.instance; + } + + listenerCount(name) { + if (this._events[name] != null) { + return this._events[name].length; + } else { + return 0; + } + } + + trigger(name, ...args) { + var _this = this; + + return _asyncToGenerator(function* () { + var e, promises; + + try { + if (name !== "debug") { + _this.trigger("debug", `Event triggered: ${name}`, args); + } + + if (_this._events[name] == null) { + return; + } + + _this._events[name] = _this._events[name].filter(function (listener) { + return listener.status !== "none"; + }); + promises = _this._events[name].map( + /*#__PURE__*/ + function () { + var _ref = _asyncToGenerator(function* (listener) { + var e, returned; + + if (listener.status === "none") { + return; + } + + if (listener.status === "once") { + listener.status = "none"; + } + + try { + returned = typeof listener.cb === "function" ? listener.cb(...args) : void 0; + + if (typeof (returned != null ? returned.then : void 0) === "function") { + return yield returned; + } else { + return returned; + } + } catch (error) { + e = error; + + if ("name" !== "error") { + _this.trigger("error", e); + } + + return null; + } + }); + + return function (_x) { + return _ref.apply(this, arguments); + }; + }()); + return (yield Promise.all(promises)).find(function (x) { + return x != null; + }); + } catch (error) { + e = error; + + if ("name" !== "error") { + _this.trigger("error", e); + } + + return null; + } + })(); + } + +}; +module.exports = Events; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/Group.js b/node_modules/bottleneck/lib/Group.js new file mode 100644 index 000000000..39676a583 --- /dev/null +++ b/node_modules/bottleneck/lib/Group.js @@ -0,0 +1,198 @@ +"use strict"; + +function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); } + +function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } + +function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } + +function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var Events, Group, IORedisConnection, RedisConnection, Scripts, parser; +parser = require("./parser"); +Events = require("./Events"); +RedisConnection = require("./RedisConnection"); +IORedisConnection = require("./IORedisConnection"); +Scripts = require("./Scripts"); + +Group = function () { + class Group { + constructor(limiterOptions = {}) { + this.deleteKey = this.deleteKey.bind(this); + this.limiterOptions = limiterOptions; + parser.load(this.limiterOptions, this.defaults, this); + this.Events = new Events(this); + this.instances = {}; + this.Bottleneck = require("./Bottleneck"); + + this._startAutoCleanup(); + + this.sharedConnection = this.connection != null; + + if (this.connection == null) { + if (this.limiterOptions.datastore === "redis") { + this.connection = new RedisConnection(Object.assign({}, this.limiterOptions, { + Events: this.Events + })); + } else if (this.limiterOptions.datastore === "ioredis") { + this.connection = new IORedisConnection(Object.assign({}, this.limiterOptions, { + Events: this.Events + })); + } + } + } + + key(key = "") { + var ref; + return (ref = this.instances[key]) != null ? ref : (() => { + var limiter; + limiter = this.instances[key] = new this.Bottleneck(Object.assign(this.limiterOptions, { + id: `${this.id}-${key}`, + timeout: this.timeout, + connection: this.connection + })); + this.Events.trigger("created", limiter, key); + return limiter; + })(); + } + + deleteKey(key = "") { + var _this = this; + + return _asyncToGenerator(function* () { + var deleted, instance; + instance = _this.instances[key]; + + if (_this.connection) { + deleted = yield _this.connection.__runCommand__(['del', ...Scripts.allKeys(`${_this.id}-${key}`)]); + } + + if (instance != null) { + delete _this.instances[key]; + yield instance.disconnect(); + } + + return instance != null || deleted > 0; + })(); + } + + limiters() { + var k, ref, results, v; + ref = this.instances; + results = []; + + for (k in ref) { + v = ref[k]; + results.push({ + key: k, + limiter: v + }); + } + + return results; + } + + keys() { + return Object.keys(this.instances); + } + + clusterKeys() { + var _this2 = this; + + return _asyncToGenerator(function* () { + var cursor, end, found, i, k, keys, len, next, start; + + if (_this2.connection == null) { + return _this2.Promise.resolve(_this2.keys()); + } + + keys = []; + cursor = null; + start = `b_${_this2.id}-`.length; + end = "_settings".length; + + while (cursor !== 0) { + var _ref = yield _this2.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", `b_${_this2.id}-*_settings`, "count", 10000]); + + var _ref2 = _slicedToArray(_ref, 2); + + next = _ref2[0]; + found = _ref2[1]; + cursor = ~~next; + + for (i = 0, len = found.length; i < len; i++) { + k = found[i]; + keys.push(k.slice(start, -end)); + } + } + + return keys; + })(); + } + + _startAutoCleanup() { + var _this3 = this; + + var base; + clearInterval(this.interval); + return typeof (base = this.interval = setInterval( + /*#__PURE__*/ + _asyncToGenerator(function* () { + var e, k, ref, results, time, v; + time = Date.now(); + ref = _this3.instances; + results = []; + + for (k in ref) { + v = ref[k]; + + try { + if (yield v._store.__groupCheck__(time)) { + results.push(_this3.deleteKey(k)); + } else { + results.push(void 0); + } + } catch (error) { + e = error; + results.push(v.Events.trigger("error", e)); + } + } + + return results; + }), this.timeout / 2)).unref === "function" ? base.unref() : void 0; + } + + updateSettings(options = {}) { + parser.overwrite(options, this.defaults, this); + parser.overwrite(options, options, this.limiterOptions); + + if (options.timeout != null) { + return this._startAutoCleanup(); + } + } + + disconnect(flush = true) { + var ref; + + if (!this.sharedConnection) { + return (ref = this.connection) != null ? ref.disconnect(flush) : void 0; + } + } + + } + + ; + Group.prototype.defaults = { + timeout: 1000 * 60 * 5, + connection: null, + Promise: Promise, + id: "group-key" + }; + return Group; +}.call(void 0); + +module.exports = Group; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/IORedisConnection.js b/node_modules/bottleneck/lib/IORedisConnection.js new file mode 100644 index 000000000..52b28da43 --- /dev/null +++ b/node_modules/bottleneck/lib/IORedisConnection.js @@ -0,0 +1,186 @@ +"use strict"; + +function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); } + +function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } + +function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } + +function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var Events, IORedisConnection, Scripts, parser; +parser = require("./parser"); +Events = require("./Events"); +Scripts = require("./Scripts"); + +IORedisConnection = function () { + class IORedisConnection { + constructor(options = {}) { + parser.load(options, this.defaults, this); + + if (this.Redis == null) { + this.Redis = eval("require")("ioredis"); // Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option. + } + + if (this.Events == null) { + this.Events = new Events(this); + } + + this.terminated = false; + + if (this.clusterNodes != null) { + this.client = new this.Redis.Cluster(this.clusterNodes, this.clientOptions); + this.subscriber = new this.Redis.Cluster(this.clusterNodes, this.clientOptions); + } else if (this.client != null && this.client.duplicate == null) { + this.subscriber = new this.Redis.Cluster(this.client.startupNodes, this.client.options); + } else { + if (this.client == null) { + this.client = new this.Redis(this.clientOptions); + } + + this.subscriber = this.client.duplicate(); + } + + this.limiters = {}; + this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => { + this._loadScripts(); + + return { + client: this.client, + subscriber: this.subscriber + }; + }); + } + + _setup(client, sub) { + client.setMaxListeners(0); + return new this.Promise((resolve, reject) => { + client.on("error", e => { + return this.Events.trigger("error", e); + }); + + if (sub) { + client.on("message", (channel, message) => { + var ref; + return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0; + }); + } + + if (client.status === "ready") { + return resolve(); + } else { + return client.once("ready", resolve); + } + }); + } + + _loadScripts() { + return Scripts.names.forEach(name => { + return this.client.defineCommand(name, { + lua: Scripts.payload(name) + }); + }); + } + + __runCommand__(cmd) { + var _this = this; + + return _asyncToGenerator(function* () { + var _, deleted; + + yield _this.ready; + + var _ref = yield _this.client.pipeline([cmd]).exec(); + + var _ref2 = _slicedToArray(_ref, 1); + + var _ref2$ = _slicedToArray(_ref2[0], 2); + + _ = _ref2$[0]; + deleted = _ref2$[1]; + return deleted; + })(); + } + + __addLimiter__(instance) { + return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => { + return new this.Promise((resolve, reject) => { + return this.subscriber.subscribe(channel, () => { + this.limiters[channel] = instance; + return resolve(); + }); + }); + })); + } + + __removeLimiter__(instance) { + var _this2 = this; + + return [instance.channel(), instance.channel_client()].forEach( + /*#__PURE__*/ + function () { + var _ref3 = _asyncToGenerator(function* (channel) { + if (!_this2.terminated) { + yield _this2.subscriber.unsubscribe(channel); + } + + return delete _this2.limiters[channel]; + }); + + return function (_x) { + return _ref3.apply(this, arguments); + }; + }()); + } + + __scriptArgs__(name, id, args, cb) { + var keys; + keys = Scripts.keys(name, id); + return [keys.length].concat(keys, args, cb); + } + + __scriptFn__(name) { + return this.client[name].bind(this.client); + } + + disconnect(flush = true) { + var i, k, len, ref; + ref = Object.keys(this.limiters); + + for (i = 0, len = ref.length; i < len; i++) { + k = ref[i]; + clearInterval(this.limiters[k]._store.heartbeat); + } + + this.limiters = {}; + this.terminated = true; + + if (flush) { + return this.Promise.all([this.client.quit(), this.subscriber.quit()]); + } else { + this.client.disconnect(); + this.subscriber.disconnect(); + return this.Promise.resolve(); + } + } + + } + + ; + IORedisConnection.prototype.datastore = "ioredis"; + IORedisConnection.prototype.defaults = { + Redis: null, + clientOptions: {}, + clusterNodes: null, + client: null, + Promise: Promise, + Events: null + }; + return IORedisConnection; +}.call(void 0); + +module.exports = IORedisConnection; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/Job.js b/node_modules/bottleneck/lib/Job.js new file mode 100644 index 000000000..09ff6ca80 --- /dev/null +++ b/node_modules/bottleneck/lib/Job.js @@ -0,0 +1,215 @@ +"use strict"; + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var BottleneckError, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser; +NUM_PRIORITIES = 10; +DEFAULT_PRIORITY = 5; +parser = require("./parser"); +BottleneckError = require("./BottleneckError"); +Job = class Job { + constructor(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) { + this.task = task; + this.args = args; + this.rejectOnDrop = rejectOnDrop; + this.Events = Events; + this._states = _states; + this.Promise = Promise; + this.options = parser.load(options, jobDefaults); + this.options.priority = this._sanitizePriority(this.options.priority); + + if (this.options.id === jobDefaults.id) { + this.options.id = `${this.options.id}-${this._randomIndex()}`; + } + + this.promise = new this.Promise((_resolve, _reject) => { + this._resolve = _resolve; + this._reject = _reject; + }); + this.retryCount = 0; + } + + _sanitizePriority(priority) { + var sProperty; + sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority; + + if (sProperty < 0) { + return 0; + } else if (sProperty > NUM_PRIORITIES - 1) { + return NUM_PRIORITIES - 1; + } else { + return sProperty; + } + } + + _randomIndex() { + return Math.random().toString(36).slice(2); + } + + doDrop({ + error, + message = "This job has been dropped by Bottleneck" + } = {}) { + if (this._states.remove(this.options.id)) { + if (this.rejectOnDrop) { + this._reject(error != null ? error : new BottleneckError(message)); + } + + this.Events.trigger("dropped", { + args: this.args, + options: this.options, + task: this.task, + promise: this.promise + }); + return true; + } else { + return false; + } + } + + _assertStatus(expected) { + var status; + status = this._states.jobStatus(this.options.id); + + if (!(status === expected || expected === "DONE" && status === null)) { + throw new BottleneckError(`Invalid job status ${status}, expected ${expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues`); + } + } + + doReceive() { + this._states.start(this.options.id); + + return this.Events.trigger("received", { + args: this.args, + options: this.options + }); + } + + doQueue(reachedHWM, blocked) { + this._assertStatus("RECEIVED"); + + this._states.next(this.options.id); + + return this.Events.trigger("queued", { + args: this.args, + options: this.options, + reachedHWM, + blocked + }); + } + + doRun() { + if (this.retryCount === 0) { + this._assertStatus("QUEUED"); + + this._states.next(this.options.id); + } else { + this._assertStatus("EXECUTING"); + } + + return this.Events.trigger("scheduled", { + args: this.args, + options: this.options + }); + } + + doExecute(chained, clearGlobalState, run, free) { + var _this = this; + + return _asyncToGenerator(function* () { + var error, eventInfo, passed; + + if (_this.retryCount === 0) { + _this._assertStatus("RUNNING"); + + _this._states.next(_this.options.id); + } else { + _this._assertStatus("EXECUTING"); + } + + eventInfo = { + args: _this.args, + options: _this.options, + retryCount: _this.retryCount + }; + + _this.Events.trigger("executing", eventInfo); + + try { + passed = yield chained != null ? chained.schedule(_this.options, _this.task, ..._this.args) : _this.task(..._this.args); + + if (clearGlobalState()) { + _this.doDone(eventInfo); + + yield free(_this.options, eventInfo); + + _this._assertStatus("DONE"); + + return _this._resolve(passed); + } + } catch (error1) { + error = error1; + return _this._onFailure(error, eventInfo, clearGlobalState, run, free); + } + })(); + } + + doExpire(clearGlobalState, run, free) { + var error, eventInfo; + + if (this._states.jobStatus(this.options.id === "RUNNING")) { + this._states.next(this.options.id); + } + + this._assertStatus("EXECUTING"); + + eventInfo = { + args: this.args, + options: this.options, + retryCount: this.retryCount + }; + error = new BottleneckError(`This job timed out after ${this.options.expiration} ms.`); + return this._onFailure(error, eventInfo, clearGlobalState, run, free); + } + + _onFailure(error, eventInfo, clearGlobalState, run, free) { + var _this2 = this; + + return _asyncToGenerator(function* () { + var retry, retryAfter; + + if (clearGlobalState()) { + retry = yield _this2.Events.trigger("failed", error, eventInfo); + + if (retry != null) { + retryAfter = ~~retry; + + _this2.Events.trigger("retry", `Retrying ${_this2.options.id} after ${retryAfter} ms`, eventInfo); + + _this2.retryCount++; + return run(retryAfter); + } else { + _this2.doDone(eventInfo); + + yield free(_this2.options, eventInfo); + + _this2._assertStatus("DONE"); + + return _this2._reject(error); + } + } + })(); + } + + doDone(eventInfo) { + this._assertStatus("EXECUTING"); + + this._states.next(this.options.id); + + return this.Events.trigger("done", eventInfo); + } + +}; +module.exports = Job; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/LocalDatastore.js b/node_modules/bottleneck/lib/LocalDatastore.js new file mode 100644 index 000000000..119849eda --- /dev/null +++ b/node_modules/bottleneck/lib/LocalDatastore.js @@ -0,0 +1,287 @@ +"use strict"; + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var BottleneckError, LocalDatastore, parser; +parser = require("./parser"); +BottleneckError = require("./BottleneckError"); +LocalDatastore = class LocalDatastore { + constructor(instance, storeOptions, storeInstanceOptions) { + this.instance = instance; + this.storeOptions = storeOptions; + this.clientId = this.instance._randomIndex(); + parser.load(storeInstanceOptions, storeInstanceOptions, this); + this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now(); + this._running = 0; + this._done = 0; + this._unblockTime = 0; + this.ready = this.Promise.resolve(); + this.clients = {}; + + this._startHeartbeat(); + } + + _startHeartbeat() { + var base; + + if (this.heartbeat == null && (this.storeOptions.reservoirRefreshInterval != null && this.storeOptions.reservoirRefreshAmount != null || this.storeOptions.reservoirIncreaseInterval != null && this.storeOptions.reservoirIncreaseAmount != null)) { + return typeof (base = this.heartbeat = setInterval(() => { + var amount, incr, maximum, now, reservoir; + now = Date.now(); + + if (this.storeOptions.reservoirRefreshInterval != null && now >= this._lastReservoirRefresh + this.storeOptions.reservoirRefreshInterval) { + this._lastReservoirRefresh = now; + this.storeOptions.reservoir = this.storeOptions.reservoirRefreshAmount; + + this.instance._drainAll(this.computeCapacity()); + } + + if (this.storeOptions.reservoirIncreaseInterval != null && now >= this._lastReservoirIncrease + this.storeOptions.reservoirIncreaseInterval) { + var _this$storeOptions = this.storeOptions; + amount = _this$storeOptions.reservoirIncreaseAmount; + maximum = _this$storeOptions.reservoirIncreaseMaximum; + reservoir = _this$storeOptions.reservoir; + this._lastReservoirIncrease = now; + incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount; + + if (incr > 0) { + this.storeOptions.reservoir += incr; + return this.instance._drainAll(this.computeCapacity()); + } + } + }, this.heartbeatInterval)).unref === "function" ? base.unref() : void 0; + } else { + return clearInterval(this.heartbeat); + } + } + + __publish__(message) { + var _this = this; + + return _asyncToGenerator(function* () { + yield _this.yieldLoop(); + return _this.instance.Events.trigger("message", message.toString()); + })(); + } + + __disconnect__(flush) { + var _this2 = this; + + return _asyncToGenerator(function* () { + yield _this2.yieldLoop(); + clearInterval(_this2.heartbeat); + return _this2.Promise.resolve(); + })(); + } + + yieldLoop(t = 0) { + return new this.Promise(function (resolve, reject) { + return setTimeout(resolve, t); + }); + } + + computePenalty() { + var ref; + return (ref = this.storeOptions.penalty) != null ? ref : 15 * this.storeOptions.minTime || 5000; + } + + __updateSettings__(options) { + var _this3 = this; + + return _asyncToGenerator(function* () { + yield _this3.yieldLoop(); + parser.overwrite(options, options, _this3.storeOptions); + + _this3._startHeartbeat(); + + _this3.instance._drainAll(_this3.computeCapacity()); + + return true; + })(); + } + + __running__() { + var _this4 = this; + + return _asyncToGenerator(function* () { + yield _this4.yieldLoop(); + return _this4._running; + })(); + } + + __queued__() { + var _this5 = this; + + return _asyncToGenerator(function* () { + yield _this5.yieldLoop(); + return _this5.instance.queued(); + })(); + } + + __done__() { + var _this6 = this; + + return _asyncToGenerator(function* () { + yield _this6.yieldLoop(); + return _this6._done; + })(); + } + + __groupCheck__(time) { + var _this7 = this; + + return _asyncToGenerator(function* () { + yield _this7.yieldLoop(); + return _this7._nextRequest + _this7.timeout < time; + })(); + } + + computeCapacity() { + var maxConcurrent, reservoir; + var _this$storeOptions2 = this.storeOptions; + maxConcurrent = _this$storeOptions2.maxConcurrent; + reservoir = _this$storeOptions2.reservoir; + + if (maxConcurrent != null && reservoir != null) { + return Math.min(maxConcurrent - this._running, reservoir); + } else if (maxConcurrent != null) { + return maxConcurrent - this._running; + } else if (reservoir != null) { + return reservoir; + } else { + return null; + } + } + + conditionsCheck(weight) { + var capacity; + capacity = this.computeCapacity(); + return capacity == null || weight <= capacity; + } + + __incrementReservoir__(incr) { + var _this8 = this; + + return _asyncToGenerator(function* () { + var reservoir; + yield _this8.yieldLoop(); + reservoir = _this8.storeOptions.reservoir += incr; + + _this8.instance._drainAll(_this8.computeCapacity()); + + return reservoir; + })(); + } + + __currentReservoir__() { + var _this9 = this; + + return _asyncToGenerator(function* () { + yield _this9.yieldLoop(); + return _this9.storeOptions.reservoir; + })(); + } + + isBlocked(now) { + return this._unblockTime >= now; + } + + check(weight, now) { + return this.conditionsCheck(weight) && this._nextRequest - now <= 0; + } + + __check__(weight) { + var _this10 = this; + + return _asyncToGenerator(function* () { + var now; + yield _this10.yieldLoop(); + now = Date.now(); + return _this10.check(weight, now); + })(); + } + + __register__(index, weight, expiration) { + var _this11 = this; + + return _asyncToGenerator(function* () { + var now, wait; + yield _this11.yieldLoop(); + now = Date.now(); + + if (_this11.conditionsCheck(weight)) { + _this11._running += weight; + + if (_this11.storeOptions.reservoir != null) { + _this11.storeOptions.reservoir -= weight; + } + + wait = Math.max(_this11._nextRequest - now, 0); + _this11._nextRequest = now + wait + _this11.storeOptions.minTime; + return { + success: true, + wait, + reservoir: _this11.storeOptions.reservoir + }; + } else { + return { + success: false + }; + } + })(); + } + + strategyIsBlock() { + return this.storeOptions.strategy === 3; + } + + __submit__(queueLength, weight) { + var _this12 = this; + + return _asyncToGenerator(function* () { + var blocked, now, reachedHWM; + yield _this12.yieldLoop(); + + if (_this12.storeOptions.maxConcurrent != null && weight > _this12.storeOptions.maxConcurrent) { + throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${_this12.storeOptions.maxConcurrent}`); + } + + now = Date.now(); + reachedHWM = _this12.storeOptions.highWater != null && queueLength === _this12.storeOptions.highWater && !_this12.check(weight, now); + blocked = _this12.strategyIsBlock() && (reachedHWM || _this12.isBlocked(now)); + + if (blocked) { + _this12._unblockTime = now + _this12.computePenalty(); + _this12._nextRequest = _this12._unblockTime + _this12.storeOptions.minTime; + + _this12.instance._dropAllQueued(); + } + + return { + reachedHWM, + blocked, + strategy: _this12.storeOptions.strategy + }; + })(); + } + + __free__(index, weight) { + var _this13 = this; + + return _asyncToGenerator(function* () { + yield _this13.yieldLoop(); + _this13._running -= weight; + _this13._done += weight; + + _this13.instance._drainAll(_this13.computeCapacity()); + + return { + running: _this13._running + }; + })(); + } + +}; +module.exports = LocalDatastore; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/Queues.js b/node_modules/bottleneck/lib/Queues.js new file mode 100644 index 000000000..1e4129ac0 --- /dev/null +++ b/node_modules/bottleneck/lib/Queues.js @@ -0,0 +1,77 @@ +"use strict"; + +var DLList, Events, Queues; +DLList = require("./DLList"); +Events = require("./Events"); +Queues = class Queues { + constructor(num_priorities) { + var i; + this.Events = new Events(this); + this._length = 0; + + this._lists = function () { + var j, ref, results; + results = []; + + for (i = j = 1, ref = num_priorities; 1 <= ref ? j <= ref : j >= ref; i = 1 <= ref ? ++j : --j) { + results.push(new DLList(() => { + return this.incr(); + }, () => { + return this.decr(); + })); + } + + return results; + }.call(this); + } + + incr() { + if (this._length++ === 0) { + return this.Events.trigger("leftzero"); + } + } + + decr() { + if (--this._length === 0) { + return this.Events.trigger("zero"); + } + } + + push(job) { + return this._lists[job.options.priority].push(job); + } + + queued(priority) { + if (priority != null) { + return this._lists[priority].length; + } else { + return this._length; + } + } + + shiftAll(fn) { + return this._lists.forEach(function (list) { + return list.forEachShift(fn); + }); + } + + getFirst(arr = this._lists) { + var j, len, list; + + for (j = 0, len = arr.length; j < len; j++) { + list = arr[j]; + + if (list.length > 0) { + return list; + } + } + + return []; + } + + shiftLastFrom(priority) { + return this.getFirst(this._lists.slice(priority).reverse()).shift(); + } + +}; +module.exports = Queues; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/RedisConnection.js b/node_modules/bottleneck/lib/RedisConnection.js new file mode 100644 index 000000000..b110704ea --- /dev/null +++ b/node_modules/bottleneck/lib/RedisConnection.js @@ -0,0 +1,193 @@ +"use strict"; + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var Events, RedisConnection, Scripts, parser; +parser = require("./parser"); +Events = require("./Events"); +Scripts = require("./Scripts"); + +RedisConnection = function () { + class RedisConnection { + constructor(options = {}) { + parser.load(options, this.defaults, this); + + if (this.Redis == null) { + this.Redis = eval("require")("redis"); // Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option. + } + + if (this.Events == null) { + this.Events = new Events(this); + } + + this.terminated = false; + + if (this.client == null) { + this.client = this.Redis.createClient(this.clientOptions); + } + + this.subscriber = this.client.duplicate(); + this.limiters = {}; + this.shas = {}; + this.ready = this.Promise.all([this._setup(this.client, false), this._setup(this.subscriber, true)]).then(() => { + return this._loadScripts(); + }).then(() => { + return { + client: this.client, + subscriber: this.subscriber + }; + }); + } + + _setup(client, sub) { + client.setMaxListeners(0); + return new this.Promise((resolve, reject) => { + client.on("error", e => { + return this.Events.trigger("error", e); + }); + + if (sub) { + client.on("message", (channel, message) => { + var ref; + return (ref = this.limiters[channel]) != null ? ref._store.onMessage(channel, message) : void 0; + }); + } + + if (client.ready) { + return resolve(); + } else { + return client.once("ready", resolve); + } + }); + } + + _loadScript(name) { + return new this.Promise((resolve, reject) => { + var payload; + payload = Scripts.payload(name); + return this.client.multi([["script", "load", payload]]).exec((err, replies) => { + if (err != null) { + return reject(err); + } + + this.shas[name] = replies[0]; + return resolve(replies[0]); + }); + }); + } + + _loadScripts() { + return this.Promise.all(Scripts.names.map(k => { + return this._loadScript(k); + })); + } + + __runCommand__(cmd) { + var _this = this; + + return _asyncToGenerator(function* () { + yield _this.ready; + return new _this.Promise((resolve, reject) => { + return _this.client.multi([cmd]).exec_atomic(function (err, replies) { + if (err != null) { + return reject(err); + } else { + return resolve(replies[0]); + } + }); + }); + })(); + } + + __addLimiter__(instance) { + return this.Promise.all([instance.channel(), instance.channel_client()].map(channel => { + return new this.Promise((resolve, reject) => { + var handler; + + handler = chan => { + if (chan === channel) { + this.subscriber.removeListener("subscribe", handler); + this.limiters[channel] = instance; + return resolve(); + } + }; + + this.subscriber.on("subscribe", handler); + return this.subscriber.subscribe(channel); + }); + })); + } + + __removeLimiter__(instance) { + var _this2 = this; + + return this.Promise.all([instance.channel(), instance.channel_client()].map( + /*#__PURE__*/ + function () { + var _ref = _asyncToGenerator(function* (channel) { + if (!_this2.terminated) { + yield new _this2.Promise((resolve, reject) => { + return _this2.subscriber.unsubscribe(channel, function (err, chan) { + if (err != null) { + return reject(err); + } + + if (chan === channel) { + return resolve(); + } + }); + }); + } + + return delete _this2.limiters[channel]; + }); + + return function (_x) { + return _ref.apply(this, arguments); + }; + }())); + } + + __scriptArgs__(name, id, args, cb) { + var keys; + keys = Scripts.keys(name, id); + return [this.shas[name], keys.length].concat(keys, args, cb); + } + + __scriptFn__(name) { + return this.client.evalsha.bind(this.client); + } + + disconnect(flush = true) { + var i, k, len, ref; + ref = Object.keys(this.limiters); + + for (i = 0, len = ref.length; i < len; i++) { + k = ref[i]; + clearInterval(this.limiters[k]._store.heartbeat); + } + + this.limiters = {}; + this.terminated = true; + this.client.end(flush); + this.subscriber.end(flush); + return this.Promise.resolve(); + } + + } + + ; + RedisConnection.prototype.datastore = "redis"; + RedisConnection.prototype.defaults = { + Redis: null, + clientOptions: {}, + client: null, + Promise: Promise, + Events: null + }; + return RedisConnection; +}.call(void 0); + +module.exports = RedisConnection; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/RedisDatastore.js b/node_modules/bottleneck/lib/RedisDatastore.js new file mode 100644 index 000000000..dc5943e8a --- /dev/null +++ b/node_modules/bottleneck/lib/RedisDatastore.js @@ -0,0 +1,352 @@ +"use strict"; + +function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _nonIterableRest(); } + +function _nonIterableRest() { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } + +function _iterableToArrayLimit(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"] != null) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } + +function _arrayWithHoles(arr) { if (Array.isArray(arr)) return arr; } + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var BottleneckError, IORedisConnection, RedisConnection, RedisDatastore, parser; +parser = require("./parser"); +BottleneckError = require("./BottleneckError"); +RedisConnection = require("./RedisConnection"); +IORedisConnection = require("./IORedisConnection"); +RedisDatastore = class RedisDatastore { + constructor(instance, storeOptions, storeInstanceOptions) { + this.instance = instance; + this.storeOptions = storeOptions; + this.originalId = this.instance.id; + this.clientId = this.instance._randomIndex(); + parser.load(storeInstanceOptions, storeInstanceOptions, this); + this.clients = {}; + this.capacityPriorityCounters = {}; + this.sharedConnection = this.connection != null; + + if (this.connection == null) { + this.connection = this.instance.datastore === "redis" ? new RedisConnection({ + Redis: this.Redis, + clientOptions: this.clientOptions, + Promise: this.Promise, + Events: this.instance.Events + }) : this.instance.datastore === "ioredis" ? new IORedisConnection({ + Redis: this.Redis, + clientOptions: this.clientOptions, + clusterNodes: this.clusterNodes, + Promise: this.Promise, + Events: this.instance.Events + }) : void 0; + } + + this.instance.connection = this.connection; + this.instance.datastore = this.connection.datastore; + this.ready = this.connection.ready.then(clients => { + this.clients = clients; + return this.runScript("init", this.prepareInitSettings(this.clearDatastore)); + }).then(() => { + return this.connection.__addLimiter__(this.instance); + }).then(() => { + return this.runScript("register_client", [this.instance.queued()]); + }).then(() => { + var base; + + if (typeof (base = this.heartbeat = setInterval(() => { + return this.runScript("heartbeat", []).catch(e => { + return this.instance.Events.trigger("error", e); + }); + }, this.heartbeatInterval)).unref === "function") { + base.unref(); + } + + return this.clients; + }); + } + + __publish__(message) { + var _this = this; + + return _asyncToGenerator(function* () { + var client; + + var _ref = yield _this.ready; + + client = _ref.client; + return client.publish(_this.instance.channel(), `message:${message.toString()}`); + })(); + } + + onMessage(channel, message) { + var _this2 = this; + + return _asyncToGenerator(function* () { + var capacity, counter, data, drained, e, newCapacity, pos, priorityClient, rawCapacity, type; + + try { + pos = message.indexOf(":"); + var _ref2 = [message.slice(0, pos), message.slice(pos + 1)]; + type = _ref2[0]; + data = _ref2[1]; + + if (type === "capacity") { + return yield _this2.instance._drainAll(data.length > 0 ? ~~data : void 0); + } else if (type === "capacity-priority") { + var _data$split = data.split(":"); + + var _data$split2 = _slicedToArray(_data$split, 3); + + rawCapacity = _data$split2[0]; + priorityClient = _data$split2[1]; + counter = _data$split2[2]; + capacity = rawCapacity.length > 0 ? ~~rawCapacity : void 0; + + if (priorityClient === _this2.clientId) { + drained = yield _this2.instance._drainAll(capacity); + newCapacity = capacity != null ? capacity - (drained || 0) : ""; + return yield _this2.clients.client.publish(_this2.instance.channel(), `capacity-priority:${newCapacity}::${counter}`); + } else if (priorityClient === "") { + clearTimeout(_this2.capacityPriorityCounters[counter]); + delete _this2.capacityPriorityCounters[counter]; + return _this2.instance._drainAll(capacity); + } else { + return _this2.capacityPriorityCounters[counter] = setTimeout( + /*#__PURE__*/ + _asyncToGenerator(function* () { + var e; + + try { + delete _this2.capacityPriorityCounters[counter]; + yield _this2.runScript("blacklist_client", [priorityClient]); + return yield _this2.instance._drainAll(capacity); + } catch (error) { + e = error; + return _this2.instance.Events.trigger("error", e); + } + }), 1000); + } + } else if (type === "message") { + return _this2.instance.Events.trigger("message", data); + } else if (type === "blocked") { + return yield _this2.instance._dropAllQueued(); + } + } catch (error) { + e = error; + return _this2.instance.Events.trigger("error", e); + } + })(); + } + + __disconnect__(flush) { + clearInterval(this.heartbeat); + + if (this.sharedConnection) { + return this.connection.__removeLimiter__(this.instance); + } else { + return this.connection.disconnect(flush); + } + } + + runScript(name, args) { + var _this3 = this; + + return _asyncToGenerator(function* () { + if (!(name === "init" || name === "register_client")) { + yield _this3.ready; + } + + return new _this3.Promise((resolve, reject) => { + var all_args, arr; + all_args = [Date.now(), _this3.clientId].concat(args); + + _this3.instance.Events.trigger("debug", `Calling Redis script: ${name}.lua`, all_args); + + arr = _this3.connection.__scriptArgs__(name, _this3.originalId, all_args, function (err, replies) { + if (err != null) { + return reject(err); + } + + return resolve(replies); + }); + return _this3.connection.__scriptFn__(name)(...arr); + }).catch(e => { + if (e.message === "SETTINGS_KEY_NOT_FOUND") { + if (name === "heartbeat") { + return _this3.Promise.resolve(); + } else { + return _this3.runScript("init", _this3.prepareInitSettings(false)).then(() => { + return _this3.runScript(name, args); + }); + } + } else if (e.message === "UNKNOWN_CLIENT") { + return _this3.runScript("register_client", [_this3.instance.queued()]).then(() => { + return _this3.runScript(name, args); + }); + } else { + return _this3.Promise.reject(e); + } + }); + })(); + } + + prepareArray(arr) { + var i, len, results, x; + results = []; + + for (i = 0, len = arr.length; i < len; i++) { + x = arr[i]; + results.push(x != null ? x.toString() : ""); + } + + return results; + } + + prepareObject(obj) { + var arr, k, v; + arr = []; + + for (k in obj) { + v = obj[k]; + arr.push(k, v != null ? v.toString() : ""); + } + + return arr; + } + + prepareInitSettings(clear) { + var args; + args = this.prepareObject(Object.assign({}, this.storeOptions, { + id: this.originalId, + version: this.instance.version, + groupTimeout: this.timeout, + clientTimeout: this.clientTimeout + })); + args.unshift(clear ? 1 : 0, this.instance.version); + return args; + } + + convertBool(b) { + return !!b; + } + + __updateSettings__(options) { + var _this4 = this; + + return _asyncToGenerator(function* () { + yield _this4.runScript("update_settings", _this4.prepareObject(options)); + return parser.overwrite(options, options, _this4.storeOptions); + })(); + } + + __running__() { + return this.runScript("running", []); + } + + __queued__() { + return this.runScript("queued", []); + } + + __done__() { + return this.runScript("done", []); + } + + __groupCheck__() { + var _this5 = this; + + return _asyncToGenerator(function* () { + return _this5.convertBool((yield _this5.runScript("group_check", []))); + })(); + } + + __incrementReservoir__(incr) { + return this.runScript("increment_reservoir", [incr]); + } + + __currentReservoir__() { + return this.runScript("current_reservoir", []); + } + + __check__(weight) { + var _this6 = this; + + return _asyncToGenerator(function* () { + return _this6.convertBool((yield _this6.runScript("check", _this6.prepareArray([weight])))); + })(); + } + + __register__(index, weight, expiration) { + var _this7 = this; + + return _asyncToGenerator(function* () { + var reservoir, success, wait; + + var _ref4 = yield _this7.runScript("register", _this7.prepareArray([index, weight, expiration])); + + var _ref5 = _slicedToArray(_ref4, 3); + + success = _ref5[0]; + wait = _ref5[1]; + reservoir = _ref5[2]; + return { + success: _this7.convertBool(success), + wait, + reservoir + }; + })(); + } + + __submit__(queueLength, weight) { + var _this8 = this; + + return _asyncToGenerator(function* () { + var blocked, e, maxConcurrent, overweight, reachedHWM, strategy; + + try { + var _ref6 = yield _this8.runScript("submit", _this8.prepareArray([queueLength, weight])); + + var _ref7 = _slicedToArray(_ref6, 3); + + reachedHWM = _ref7[0]; + blocked = _ref7[1]; + strategy = _ref7[2]; + return { + reachedHWM: _this8.convertBool(reachedHWM), + blocked: _this8.convertBool(blocked), + strategy + }; + } catch (error) { + e = error; + + if (e.message.indexOf("OVERWEIGHT") === 0) { + var _e$message$split = e.message.split(":"); + + var _e$message$split2 = _slicedToArray(_e$message$split, 3); + + overweight = _e$message$split2[0]; + weight = _e$message$split2[1]; + maxConcurrent = _e$message$split2[2]; + throw new BottleneckError(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${maxConcurrent}`); + } else { + throw e; + } + } + })(); + } + + __free__(index, weight) { + var _this9 = this; + + return _asyncToGenerator(function* () { + var running; + running = yield _this9.runScript("free", _this9.prepareArray([index])); + return { + running + }; + })(); + } + +}; +module.exports = RedisDatastore; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/Scripts.js b/node_modules/bottleneck/lib/Scripts.js new file mode 100644 index 000000000..96467eb1e --- /dev/null +++ b/node_modules/bottleneck/lib/Scripts.js @@ -0,0 +1,162 @@ +"use strict"; + +var headers, lua, templates; +lua = require("./lua.json"); +headers = { + refs: lua["refs.lua"], + validate_keys: lua["validate_keys.lua"], + validate_client: lua["validate_client.lua"], + refresh_expiration: lua["refresh_expiration.lua"], + process_tick: lua["process_tick.lua"], + conditions_check: lua["conditions_check.lua"], + get_time: lua["get_time.lua"] +}; + +exports.allKeys = function (id) { + return [ + /* + HASH + */ + `b_${id}_settings`, + /* + HASH + job index -> weight + */ + `b_${id}_job_weights`, + /* + ZSET + job index -> expiration + */ + `b_${id}_job_expirations`, + /* + HASH + job index -> client + */ + `b_${id}_job_clients`, + /* + ZSET + client -> sum running + */ + `b_${id}_client_running`, + /* + HASH + client -> num queued + */ + `b_${id}_client_num_queued`, + /* + ZSET + client -> last job registered + */ + `b_${id}_client_last_registered`, + /* + ZSET + client -> last seen + */ + `b_${id}_client_last_seen`]; +}; + +templates = { + init: { + keys: exports.allKeys, + headers: ["process_tick"], + refresh_expiration: true, + code: lua["init.lua"] + }, + group_check: { + keys: exports.allKeys, + headers: [], + refresh_expiration: false, + code: lua["group_check.lua"] + }, + register_client: { + keys: exports.allKeys, + headers: ["validate_keys"], + refresh_expiration: false, + code: lua["register_client.lua"] + }, + blacklist_client: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client"], + refresh_expiration: false, + code: lua["blacklist_client.lua"] + }, + heartbeat: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["heartbeat.lua"] + }, + update_settings: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: true, + code: lua["update_settings.lua"] + }, + running: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["running.lua"] + }, + queued: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client"], + refresh_expiration: false, + code: lua["queued.lua"] + }, + done: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["done.lua"] + }, + check: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], + refresh_expiration: false, + code: lua["check.lua"] + }, + submit: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], + refresh_expiration: true, + code: lua["submit.lua"] + }, + register: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"], + refresh_expiration: true, + code: lua["register.lua"] + }, + free: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: true, + code: lua["free.lua"] + }, + current_reservoir: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: false, + code: lua["current_reservoir.lua"] + }, + increment_reservoir: { + keys: exports.allKeys, + headers: ["validate_keys", "validate_client", "process_tick"], + refresh_expiration: true, + code: lua["increment_reservoir.lua"] + } +}; +exports.names = Object.keys(templates); + +exports.keys = function (name, id) { + return templates[name].keys(id); +}; + +exports.payload = function (name) { + var template; + template = templates[name]; + return Array.prototype.concat(headers.refs, template.headers.map(function (h) { + return headers[h]; + }), template.refresh_expiration ? headers.refresh_expiration : "", template.code).join("\n"); +}; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/States.js b/node_modules/bottleneck/lib/States.js new file mode 100644 index 000000000..9b8ac1422 --- /dev/null +++ b/node_modules/bottleneck/lib/States.js @@ -0,0 +1,88 @@ +"use strict"; + +var BottleneckError, States; +BottleneckError = require("./BottleneckError"); +States = class States { + constructor(status1) { + this.status = status1; + this._jobs = {}; + this.counts = this.status.map(function () { + return 0; + }); + } + + next(id) { + var current, next; + current = this._jobs[id]; + next = current + 1; + + if (current != null && next < this.status.length) { + this.counts[current]--; + this.counts[next]++; + return this._jobs[id]++; + } else if (current != null) { + this.counts[current]--; + return delete this._jobs[id]; + } + } + + start(id) { + var initial; + initial = 0; + this._jobs[id] = initial; + return this.counts[initial]++; + } + + remove(id) { + var current; + current = this._jobs[id]; + + if (current != null) { + this.counts[current]--; + delete this._jobs[id]; + } + + return current != null; + } + + jobStatus(id) { + var ref; + return (ref = this.status[this._jobs[id]]) != null ? ref : null; + } + + statusJobs(status) { + var k, pos, ref, results, v; + + if (status != null) { + pos = this.status.indexOf(status); + + if (pos < 0) { + throw new BottleneckError(`status must be one of ${this.status.join(', ')}`); + } + + ref = this._jobs; + results = []; + + for (k in ref) { + v = ref[k]; + + if (v === pos) { + results.push(k); + } + } + + return results; + } else { + return Object.keys(this._jobs); + } + } + + statusCounts() { + return this.counts.reduce((acc, v, i) => { + acc[this.status[i]] = v; + return acc; + }, {}); + } + +}; +module.exports = States; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/Sync.js b/node_modules/bottleneck/lib/Sync.js new file mode 100644 index 000000000..f51eee4a0 --- /dev/null +++ b/node_modules/bottleneck/lib/Sync.js @@ -0,0 +1,80 @@ +"use strict"; + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +var DLList, Sync; +DLList = require("./DLList"); +Sync = class Sync { + constructor(name, Promise) { + this.schedule = this.schedule.bind(this); + this.name = name; + this.Promise = Promise; + this._running = 0; + this._queue = new DLList(); + } + + isEmpty() { + return this._queue.length === 0; + } + + _tryToRun() { + var _this = this; + + return _asyncToGenerator(function* () { + var args, cb, error, reject, resolve, returned, task; + + if (_this._running < 1 && _this._queue.length > 0) { + _this._running++; + + var _this$_queue$shift = _this._queue.shift(); + + task = _this$_queue$shift.task; + args = _this$_queue$shift.args; + resolve = _this$_queue$shift.resolve; + reject = _this$_queue$shift.reject; + cb = yield _asyncToGenerator(function* () { + try { + returned = yield task(...args); + return function () { + return resolve(returned); + }; + } catch (error1) { + error = error1; + return function () { + return reject(error); + }; + } + })(); + _this._running--; + + _this._tryToRun(); + + return cb(); + } + })(); + } + + schedule(task, ...args) { + var promise, reject, resolve; + resolve = reject = null; + promise = new this.Promise(function (_resolve, _reject) { + resolve = _resolve; + return reject = _reject; + }); + + this._queue.push({ + task, + args, + resolve, + reject + }); + + this._tryToRun(); + + return promise; + } + +}; +module.exports = Sync; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/es5.js b/node_modules/bottleneck/lib/es5.js new file mode 100644 index 000000000..822a26d8c --- /dev/null +++ b/node_modules/bottleneck/lib/es5.js @@ -0,0 +1,5 @@ +"use strict"; + +require("regenerator-runtime/runtime"); + +module.exports = require("./Bottleneck"); \ No newline at end of file diff --git a/node_modules/bottleneck/lib/index.js b/node_modules/bottleneck/lib/index.js new file mode 100644 index 000000000..3d447c13b --- /dev/null +++ b/node_modules/bottleneck/lib/index.js @@ -0,0 +1,3 @@ +"use strict"; + +module.exports = require("./Bottleneck"); \ No newline at end of file diff --git a/node_modules/bottleneck/lib/lua.json b/node_modules/bottleneck/lib/lua.json new file mode 100644 index 000000000..c17cc4990 --- /dev/null +++ b/node_modules/bottleneck/lib/lua.json @@ -0,0 +1,24 @@ +{ + "blacklist_client.lua": "local blacklist = ARGV[num_static_argv + 1]\n\nif redis.call('zscore', client_last_seen_key, blacklist) then\n redis.call('zadd', client_last_seen_key, 0, blacklist)\nend\n\n\nreturn {}\n", + "check.lua": "local weight = tonumber(ARGV[num_static_argv + 1])\n\nlocal capacity = process_tick(now, false)['capacity']\nlocal nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest'))\n\nreturn conditions_check(capacity, weight) and nextRequest - now <= 0\n", + "conditions_check.lua": "local conditions_check = function (capacity, weight)\n return capacity == nil or weight <= capacity\nend\n", + "current_reservoir.lua": "return process_tick(now, false)['reservoir']\n", + "done.lua": "process_tick(now, false)\n\nreturn tonumber(redis.call('hget', settings_key, 'done'))\n", + "free.lua": "local index = ARGV[num_static_argv + 1]\n\nredis.call('zadd', job_expirations_key, 0, index)\n\nreturn process_tick(now, false)['running']\n", + "get_time.lua": "redis.replicate_commands()\n\nlocal get_time = function ()\n local time = redis.call('time')\n\n return tonumber(time[1]..string.sub(time[2], 1, 3))\nend\n", + "group_check.lua": "return not (redis.call('exists', settings_key) == 1)\n", + "heartbeat.lua": "process_tick(now, true)\n", + "increment_reservoir.lua": "local incr = tonumber(ARGV[num_static_argv + 1])\n\nredis.call('hincrby', settings_key, 'reservoir', incr)\n\nlocal reservoir = process_tick(now, true)['reservoir']\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn reservoir\n", + "init.lua": "local clear = tonumber(ARGV[num_static_argv + 1])\nlocal limiter_version = ARGV[num_static_argv + 2]\nlocal num_local_argv = num_static_argv + 2\n\nif clear == 1 then\n redis.call('del', unpack(KEYS))\nend\n\nif redis.call('exists', settings_key) == 0 then\n -- Create\n local args = {'hmset', settings_key}\n\n for i = num_local_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\n end\n\n redis.call(unpack(args))\n redis.call('hmset', settings_key,\n 'nextRequest', now,\n 'lastReservoirRefresh', now,\n 'lastReservoirIncrease', now,\n 'running', 0,\n 'done', 0,\n 'unblockTime', 0,\n 'capacityPriorityCounter', 0\n )\n\nelse\n -- Apply migrations\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'version'\n )\n local id = settings[1]\n local current_version = settings[2]\n\n if current_version ~= limiter_version then\n local version_digits = {}\n for k, v in string.gmatch(current_version, \"([^.]+)\") do\n table.insert(version_digits, tonumber(k))\n end\n\n -- 2.10.0\n if version_digits[2] < 10 then\n redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '')\n redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '')\n redis.call('hsetnx', settings_key, 'done', 0)\n redis.call('hset', settings_key, 'version', '2.10.0')\n end\n\n -- 2.11.1\n if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then\n if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then\n redis.call('hmset', settings_key,\n 'lastReservoirRefresh', now,\n 'version', '2.11.1'\n )\n end\n end\n\n -- 2.14.0\n if version_digits[2] < 14 then\n local old_running_key = 'b_'..id..'_running'\n local old_executing_key = 'b_'..id..'_executing'\n\n if redis.call('exists', old_running_key) == 1 then\n redis.call('rename', old_running_key, job_weights_key)\n end\n if redis.call('exists', old_executing_key) == 1 then\n redis.call('rename', old_executing_key, job_expirations_key)\n end\n redis.call('hset', settings_key, 'version', '2.14.0')\n end\n\n -- 2.15.2\n if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then\n redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0)\n redis.call('hset', settings_key, 'version', '2.15.2')\n end\n\n -- 2.17.0\n if version_digits[2] < 17 then\n redis.call('hsetnx', settings_key, 'clientTimeout', 10000)\n redis.call('hset', settings_key, 'version', '2.17.0')\n end\n\n -- 2.18.0\n if version_digits[2] < 18 then\n redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '')\n redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '')\n redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now)\n redis.call('hset', settings_key, 'version', '2.18.0')\n end\n\n end\n\n process_tick(now, false)\nend\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n", + "process_tick.lua": "local process_tick = function (now, always_publish)\n\n local compute_capacity = function (maxConcurrent, running, reservoir)\n if maxConcurrent ~= nil and reservoir ~= nil then\n return math.min((maxConcurrent - running), reservoir)\n elseif maxConcurrent ~= nil then\n return maxConcurrent - running\n elseif reservoir ~= nil then\n return reservoir\n else\n return nil\n end\n end\n\n local settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'running',\n 'reservoir',\n 'reservoirRefreshInterval',\n 'reservoirRefreshAmount',\n 'lastReservoirRefresh',\n 'reservoirIncreaseInterval',\n 'reservoirIncreaseAmount',\n 'reservoirIncreaseMaximum',\n 'lastReservoirIncrease',\n 'capacityPriorityCounter',\n 'clientTimeout'\n )\n local id = settings[1]\n local maxConcurrent = tonumber(settings[2])\n local running = tonumber(settings[3])\n local reservoir = tonumber(settings[4])\n local reservoirRefreshInterval = tonumber(settings[5])\n local reservoirRefreshAmount = tonumber(settings[6])\n local lastReservoirRefresh = tonumber(settings[7])\n local reservoirIncreaseInterval = tonumber(settings[8])\n local reservoirIncreaseAmount = tonumber(settings[9])\n local reservoirIncreaseMaximum = tonumber(settings[10])\n local lastReservoirIncrease = tonumber(settings[11])\n local capacityPriorityCounter = tonumber(settings[12])\n local clientTimeout = tonumber(settings[13])\n\n local initial_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n --\n -- Process 'running' changes\n --\n local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now)\n\n if #expired > 0 then\n redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now)\n\n local flush_batch = function (batch, acc)\n local weights = redis.call('hmget', job_weights_key, unpack(batch))\n redis.call('hdel', job_weights_key, unpack(batch))\n local clients = redis.call('hmget', job_clients_key, unpack(batch))\n redis.call('hdel', job_clients_key, unpack(batch))\n\n -- Calculate sum of removed weights\n for i = 1, #weights do\n acc['total'] = acc['total'] + (tonumber(weights[i]) or 0)\n end\n\n -- Calculate sum of removed weights by client\n local client_weights = {}\n for i = 1, #clients do\n local removed = tonumber(weights[i]) or 0\n if removed > 0 then\n acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed\n end\n end\n end\n\n local acc = {\n ['total'] = 0,\n ['client_weights'] = {}\n }\n local batch_size = 1000\n\n -- Compute changes to Zsets and apply changes to Hashes\n for i = 1, #expired, batch_size do\n local batch = {}\n for j = i, math.min(i + batch_size - 1, #expired) do\n table.insert(batch, expired[j])\n end\n\n flush_batch(batch, acc)\n end\n\n -- Apply changes to Zsets\n if acc['total'] > 0 then\n redis.call('hincrby', settings_key, 'done', acc['total'])\n running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total']))\n end\n\n for client, weight in pairs(acc['client_weights']) do\n redis.call('zincrby', client_running_key, -weight, client)\n end\n end\n\n --\n -- Process 'reservoir' changes\n --\n local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil\n if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then\n reservoir = reservoirRefreshAmount\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirRefresh', now\n )\n end\n\n local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil\n if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then\n local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval)\n local incr = reservoirIncreaseAmount * num_intervals\n if reservoirIncreaseMaximum ~= nil then\n incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0))\n end\n if incr > 0 then\n reservoir = (reservoir or 0) + incr\n end\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval)\n )\n end\n\n --\n -- Clear unresponsive clients\n --\n local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout))\n local unresponsive_lookup = {}\n local terminated_clients = {}\n for i = 1, #unresponsive do\n unresponsive_lookup[unresponsive[i]] = true\n if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then\n table.insert(terminated_clients, unresponsive[i])\n end\n end\n if #terminated_clients > 0 then\n redis.call('zrem', client_running_key, unpack(terminated_clients))\n redis.call('hdel', client_num_queued_key, unpack(terminated_clients))\n redis.call('zrem', client_last_registered_key, unpack(terminated_clients))\n redis.call('zrem', client_last_seen_key, unpack(terminated_clients))\n end\n\n --\n -- Broadcast capacity changes\n --\n local final_capacity = compute_capacity(maxConcurrent, running, reservoir)\n\n if always_publish or (initial_capacity ~= nil and final_capacity == nil) then\n -- always_publish or was not unlimited, now unlimited\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n\n elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then\n -- capacity was increased\n -- send the capacity message to the limiter having the lowest number of running jobs\n -- the tiebreaker is the limiter having not registered a job in the longest time\n\n local lowest_concurrency_value = nil\n local lowest_concurrency_clients = {}\n local lowest_concurrency_last_registered = {}\n local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores')\n\n for i = 1, #client_concurrencies, 2 do\n local client = client_concurrencies[i]\n local concurrency = tonumber(client_concurrencies[i+1])\n\n if (\n lowest_concurrency_value == nil or lowest_concurrency_value == concurrency\n ) and (\n not unresponsive_lookup[client]\n ) and (\n tonumber(redis.call('hget', client_num_queued_key, client)) > 0\n ) then\n lowest_concurrency_value = concurrency\n table.insert(lowest_concurrency_clients, client)\n local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client))\n table.insert(lowest_concurrency_last_registered, last_registered)\n end\n end\n\n if #lowest_concurrency_clients > 0 then\n local position = 1\n local earliest = lowest_concurrency_last_registered[1]\n\n for i,v in ipairs(lowest_concurrency_last_registered) do\n if v < earliest then\n position = i\n earliest = v\n end\n end\n\n local next_client = lowest_concurrency_clients[position]\n redis.call('publish', 'b_'..id,\n 'capacity-priority:'..(final_capacity or '')..\n ':'..next_client..\n ':'..capacityPriorityCounter\n )\n redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1')\n else\n redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or ''))\n end\n end\n\n return {\n ['capacity'] = final_capacity,\n ['running'] = running,\n ['reservoir'] = reservoir\n }\nend\n", + "queued.lua": "local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout'))\nlocal valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf')\nlocal client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients))\n\nlocal sum = 0\nfor i = 1, #client_queued do\n sum = sum + tonumber(client_queued[i])\nend\n\nreturn sum\n", + "refresh_expiration.lua": "local refresh_expiration = function (now, nextRequest, groupTimeout)\n\n if groupTimeout ~= nil then\n local ttl = (nextRequest + groupTimeout) - now\n\n for i = 1, #KEYS do\n redis.call('pexpire', KEYS[i], ttl)\n end\n end\n\nend\n", + "refs.lua": "local settings_key = KEYS[1]\nlocal job_weights_key = KEYS[2]\nlocal job_expirations_key = KEYS[3]\nlocal job_clients_key = KEYS[4]\nlocal client_running_key = KEYS[5]\nlocal client_num_queued_key = KEYS[6]\nlocal client_last_registered_key = KEYS[7]\nlocal client_last_seen_key = KEYS[8]\n\nlocal now = tonumber(ARGV[1])\nlocal client = ARGV[2]\n\nlocal num_static_argv = 2\n", + "register.lua": "local index = ARGV[num_static_argv + 1]\nlocal weight = tonumber(ARGV[num_static_argv + 2])\nlocal expiration = tonumber(ARGV[num_static_argv + 3])\n\nlocal state = process_tick(now, false)\nlocal capacity = state['capacity']\nlocal reservoir = state['reservoir']\n\nlocal settings = redis.call('hmget', settings_key,\n 'nextRequest',\n 'minTime',\n 'groupTimeout'\n)\nlocal nextRequest = tonumber(settings[1])\nlocal minTime = tonumber(settings[2])\nlocal groupTimeout = tonumber(settings[3])\n\nif conditions_check(capacity, weight) then\n\n redis.call('hincrby', settings_key, 'running', weight)\n redis.call('hset', job_weights_key, index, weight)\n if expiration ~= nil then\n redis.call('zadd', job_expirations_key, now + expiration, index)\n end\n redis.call('hset', job_clients_key, index, client)\n redis.call('zincrby', client_running_key, weight, client)\n redis.call('hincrby', client_num_queued_key, client, -1)\n redis.call('zadd', client_last_registered_key, now, client)\n\n local wait = math.max(nextRequest - now, 0)\n local newNextRequest = now + wait + minTime\n\n if reservoir == nil then\n redis.call('hset', settings_key,\n 'nextRequest', newNextRequest\n )\n else\n reservoir = reservoir - weight\n redis.call('hmset', settings_key,\n 'reservoir', reservoir,\n 'nextRequest', newNextRequest\n )\n end\n\n refresh_expiration(now, newNextRequest, groupTimeout)\n\n return {true, wait, reservoir}\n\nelse\n return {false}\nend\n", + "register_client.lua": "local queued = tonumber(ARGV[num_static_argv + 1])\n\n-- Could have been re-registered concurrently\nif not redis.call('zscore', client_last_seen_key, client) then\n redis.call('zadd', client_running_key, 0, client)\n redis.call('hset', client_num_queued_key, client, queued)\n redis.call('zadd', client_last_registered_key, 0, client)\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n\nreturn {}\n", + "running.lua": "return process_tick(now, false)['running']\n", + "submit.lua": "local queueLength = tonumber(ARGV[num_static_argv + 1])\nlocal weight = tonumber(ARGV[num_static_argv + 2])\n\nlocal capacity = process_tick(now, false)['capacity']\n\nlocal settings = redis.call('hmget', settings_key,\n 'id',\n 'maxConcurrent',\n 'highWater',\n 'nextRequest',\n 'strategy',\n 'unblockTime',\n 'penalty',\n 'minTime',\n 'groupTimeout'\n)\nlocal id = settings[1]\nlocal maxConcurrent = tonumber(settings[2])\nlocal highWater = tonumber(settings[3])\nlocal nextRequest = tonumber(settings[4])\nlocal strategy = tonumber(settings[5])\nlocal unblockTime = tonumber(settings[6])\nlocal penalty = tonumber(settings[7])\nlocal minTime = tonumber(settings[8])\nlocal groupTimeout = tonumber(settings[9])\n\nif maxConcurrent ~= nil and weight > maxConcurrent then\n return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent)\nend\n\nlocal reachedHWM = (highWater ~= nil and queueLength == highWater\n and not (\n conditions_check(capacity, weight)\n and nextRequest - now <= 0\n )\n)\n\nlocal blocked = strategy == 3 and (reachedHWM or unblockTime >= now)\n\nif blocked then\n local computedPenalty = penalty\n if computedPenalty == nil then\n if minTime == 0 then\n computedPenalty = 5000\n else\n computedPenalty = 15 * minTime\n end\n end\n\n local newNextRequest = now + computedPenalty + minTime\n\n redis.call('hmset', settings_key,\n 'unblockTime', now + computedPenalty,\n 'nextRequest', newNextRequest\n )\n\n local clients_queued_reset = redis.call('hkeys', client_num_queued_key)\n local queued_reset = {}\n for i = 1, #clients_queued_reset do\n table.insert(queued_reset, clients_queued_reset[i])\n table.insert(queued_reset, 0)\n end\n redis.call('hmset', client_num_queued_key, unpack(queued_reset))\n\n redis.call('publish', 'b_'..id, 'blocked:')\n\n refresh_expiration(now, newNextRequest, groupTimeout)\nend\n\nif not blocked and not reachedHWM then\n redis.call('hincrby', client_num_queued_key, client, 1)\nend\n\nreturn {reachedHWM, blocked, strategy}\n", + "update_settings.lua": "local args = {'hmset', settings_key}\n\nfor i = num_static_argv + 1, #ARGV do\n table.insert(args, ARGV[i])\nend\n\nredis.call(unpack(args))\n\nprocess_tick(now, true)\n\nlocal groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout'))\nrefresh_expiration(0, 0, groupTimeout)\n\nreturn {}\n", + "validate_client.lua": "if not redis.call('zscore', client_last_seen_key, client) then\n return redis.error_reply('UNKNOWN_CLIENT')\nend\n\nredis.call('zadd', client_last_seen_key, now, client)\n", + "validate_keys.lua": "if not (redis.call('exists', settings_key) == 1) then\n return redis.error_reply('SETTINGS_KEY_NOT_FOUND')\nend\n" +} diff --git a/node_modules/bottleneck/lib/parser.js b/node_modules/bottleneck/lib/parser.js new file mode 100644 index 000000000..8686191f0 --- /dev/null +++ b/node_modules/bottleneck/lib/parser.js @@ -0,0 +1,26 @@ +"use strict"; + +exports.load = function (received, defaults, onto = {}) { + var k, ref, v; + + for (k in defaults) { + v = defaults[k]; + onto[k] = (ref = received[k]) != null ? ref : v; + } + + return onto; +}; + +exports.overwrite = function (received, defaults, onto = {}) { + var k, v; + + for (k in received) { + v = received[k]; + + if (defaults[k] !== void 0) { + onto[k] = v; + } + } + + return onto; +}; \ No newline at end of file diff --git a/node_modules/bottleneck/lib/version.json b/node_modules/bottleneck/lib/version.json new file mode 100644 index 000000000..578a219cf --- /dev/null +++ b/node_modules/bottleneck/lib/version.json @@ -0,0 +1 @@ +{"version":"2.19.5"} diff --git a/node_modules/bottleneck/light.js b/node_modules/bottleneck/light.js new file mode 100644 index 000000000..c4aa26537 --- /dev/null +++ b/node_modules/bottleneck/light.js @@ -0,0 +1,1524 @@ +/** + * This file contains the Bottleneck library (MIT), compiled to ES2017, and without Clustering support. + * https://github.com/SGrondin/bottleneck + */ +(function (global, factory) { + typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : + typeof define === 'function' && define.amd ? define(factory) : + (global.Bottleneck = factory()); +}(this, (function () { 'use strict'; + + var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {}; + + function getCjsExportFromNamespace (n) { + return n && n['default'] || n; + } + + var load = function(received, defaults, onto = {}) { + var k, ref, v; + for (k in defaults) { + v = defaults[k]; + onto[k] = (ref = received[k]) != null ? ref : v; + } + return onto; + }; + + var overwrite = function(received, defaults, onto = {}) { + var k, v; + for (k in received) { + v = received[k]; + if (defaults[k] !== void 0) { + onto[k] = v; + } + } + return onto; + }; + + var parser = { + load: load, + overwrite: overwrite + }; + + var DLList; + + DLList = class DLList { + constructor(incr, decr) { + this.incr = incr; + this.decr = decr; + this._first = null; + this._last = null; + this.length = 0; + } + + push(value) { + var node; + this.length++; + if (typeof this.incr === "function") { + this.incr(); + } + node = { + value, + prev: this._last, + next: null + }; + if (this._last != null) { + this._last.next = node; + this._last = node; + } else { + this._first = this._last = node; + } + return void 0; + } + + shift() { + var value; + if (this._first == null) { + return; + } else { + this.length--; + if (typeof this.decr === "function") { + this.decr(); + } + } + value = this._first.value; + if ((this._first = this._first.next) != null) { + this._first.prev = null; + } else { + this._last = null; + } + return value; + } + + first() { + if (this._first != null) { + return this._first.value; + } + } + + getArray() { + var node, ref, results; + node = this._first; + results = []; + while (node != null) { + results.push((ref = node, node = node.next, ref.value)); + } + return results; + } + + forEachShift(cb) { + var node; + node = this.shift(); + while (node != null) { + (cb(node), node = this.shift()); + } + return void 0; + } + + debug() { + var node, ref, ref1, ref2, results; + node = this._first; + results = []; + while (node != null) { + results.push((ref = node, node = node.next, { + value: ref.value, + prev: (ref1 = ref.prev) != null ? ref1.value : void 0, + next: (ref2 = ref.next) != null ? ref2.value : void 0 + })); + } + return results; + } + + }; + + var DLList_1 = DLList; + + var Events; + + Events = class Events { + constructor(instance) { + this.instance = instance; + this._events = {}; + if ((this.instance.on != null) || (this.instance.once != null) || (this.instance.removeAllListeners != null)) { + throw new Error("An Emitter already exists for this object"); + } + this.instance.on = (name, cb) => { + return this._addListener(name, "many", cb); + }; + this.instance.once = (name, cb) => { + return this._addListener(name, "once", cb); + }; + this.instance.removeAllListeners = (name = null) => { + if (name != null) { + return delete this._events[name]; + } else { + return this._events = {}; + } + }; + } + + _addListener(name, status, cb) { + var base; + if ((base = this._events)[name] == null) { + base[name] = []; + } + this._events[name].push({cb, status}); + return this.instance; + } + + listenerCount(name) { + if (this._events[name] != null) { + return this._events[name].length; + } else { + return 0; + } + } + + async trigger(name, ...args) { + var e, promises; + try { + if (name !== "debug") { + this.trigger("debug", `Event triggered: ${name}`, args); + } + if (this._events[name] == null) { + return; + } + this._events[name] = this._events[name].filter(function(listener) { + return listener.status !== "none"; + }); + promises = this._events[name].map(async(listener) => { + var e, returned; + if (listener.status === "none") { + return; + } + if (listener.status === "once") { + listener.status = "none"; + } + try { + returned = typeof listener.cb === "function" ? listener.cb(...args) : void 0; + if (typeof (returned != null ? returned.then : void 0) === "function") { + return (await returned); + } else { + return returned; + } + } catch (error) { + e = error; + { + this.trigger("error", e); + } + return null; + } + }); + return ((await Promise.all(promises))).find(function(x) { + return x != null; + }); + } catch (error) { + e = error; + { + this.trigger("error", e); + } + return null; + } + } + + }; + + var Events_1 = Events; + + var DLList$1, Events$1, Queues; + + DLList$1 = DLList_1; + + Events$1 = Events_1; + + Queues = class Queues { + constructor(num_priorities) { + var i; + this.Events = new Events$1(this); + this._length = 0; + this._lists = (function() { + var j, ref, results; + results = []; + for (i = j = 1, ref = num_priorities; (1 <= ref ? j <= ref : j >= ref); i = 1 <= ref ? ++j : --j) { + results.push(new DLList$1((() => { + return this.incr(); + }), (() => { + return this.decr(); + }))); + } + return results; + }).call(this); + } + + incr() { + if (this._length++ === 0) { + return this.Events.trigger("leftzero"); + } + } + + decr() { + if (--this._length === 0) { + return this.Events.trigger("zero"); + } + } + + push(job) { + return this._lists[job.options.priority].push(job); + } + + queued(priority) { + if (priority != null) { + return this._lists[priority].length; + } else { + return this._length; + } + } + + shiftAll(fn) { + return this._lists.forEach(function(list) { + return list.forEachShift(fn); + }); + } + + getFirst(arr = this._lists) { + var j, len, list; + for (j = 0, len = arr.length; j < len; j++) { + list = arr[j]; + if (list.length > 0) { + return list; + } + } + return []; + } + + shiftLastFrom(priority) { + return this.getFirst(this._lists.slice(priority).reverse()).shift(); + } + + }; + + var Queues_1 = Queues; + + var BottleneckError; + + BottleneckError = class BottleneckError extends Error {}; + + var BottleneckError_1 = BottleneckError; + + var BottleneckError$1, DEFAULT_PRIORITY, Job, NUM_PRIORITIES, parser$1; + + NUM_PRIORITIES = 10; + + DEFAULT_PRIORITY = 5; + + parser$1 = parser; + + BottleneckError$1 = BottleneckError_1; + + Job = class Job { + constructor(task, args, options, jobDefaults, rejectOnDrop, Events, _states, Promise) { + this.task = task; + this.args = args; + this.rejectOnDrop = rejectOnDrop; + this.Events = Events; + this._states = _states; + this.Promise = Promise; + this.options = parser$1.load(options, jobDefaults); + this.options.priority = this._sanitizePriority(this.options.priority); + if (this.options.id === jobDefaults.id) { + this.options.id = `${this.options.id}-${this._randomIndex()}`; + } + this.promise = new this.Promise((_resolve, _reject) => { + this._resolve = _resolve; + this._reject = _reject; + }); + this.retryCount = 0; + } + + _sanitizePriority(priority) { + var sProperty; + sProperty = ~~priority !== priority ? DEFAULT_PRIORITY : priority; + if (sProperty < 0) { + return 0; + } else if (sProperty > NUM_PRIORITIES - 1) { + return NUM_PRIORITIES - 1; + } else { + return sProperty; + } + } + + _randomIndex() { + return Math.random().toString(36).slice(2); + } + + doDrop({error, message = "This job has been dropped by Bottleneck"} = {}) { + if (this._states.remove(this.options.id)) { + if (this.rejectOnDrop) { + this._reject(error != null ? error : new BottleneckError$1(message)); + } + this.Events.trigger("dropped", {args: this.args, options: this.options, task: this.task, promise: this.promise}); + return true; + } else { + return false; + } + } + + _assertStatus(expected) { + var status; + status = this._states.jobStatus(this.options.id); + if (!(status === expected || (expected === "DONE" && status === null))) { + throw new BottleneckError$1(`Invalid job status ${status}, expected ${expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues`); + } + } + + doReceive() { + this._states.start(this.options.id); + return this.Events.trigger("received", {args: this.args, options: this.options}); + } + + doQueue(reachedHWM, blocked) { + this._assertStatus("RECEIVED"); + this._states.next(this.options.id); + return this.Events.trigger("queued", {args: this.args, options: this.options, reachedHWM, blocked}); + } + + doRun() { + if (this.retryCount === 0) { + this._assertStatus("QUEUED"); + this._states.next(this.options.id); + } else { + this._assertStatus("EXECUTING"); + } + return this.Events.trigger("scheduled", {args: this.args, options: this.options}); + } + + async doExecute(chained, clearGlobalState, run, free) { + var error, eventInfo, passed; + if (this.retryCount === 0) { + this._assertStatus("RUNNING"); + this._states.next(this.options.id); + } else { + this._assertStatus("EXECUTING"); + } + eventInfo = {args: this.args, options: this.options, retryCount: this.retryCount}; + this.Events.trigger("executing", eventInfo); + try { + passed = (await (chained != null ? chained.schedule(this.options, this.task, ...this.args) : this.task(...this.args))); + if (clearGlobalState()) { + this.doDone(eventInfo); + await free(this.options, eventInfo); + this._assertStatus("DONE"); + return this._resolve(passed); + } + } catch (error1) { + error = error1; + return this._onFailure(error, eventInfo, clearGlobalState, run, free); + } + } + + doExpire(clearGlobalState, run, free) { + var error, eventInfo; + if (this._states.jobStatus(this.options.id === "RUNNING")) { + this._states.next(this.options.id); + } + this._assertStatus("EXECUTING"); + eventInfo = {args: this.args, options: this.options, retryCount: this.retryCount}; + error = new BottleneckError$1(`This job timed out after ${this.options.expiration} ms.`); + return this._onFailure(error, eventInfo, clearGlobalState, run, free); + } + + async _onFailure(error, eventInfo, clearGlobalState, run, free) { + var retry, retryAfter; + if (clearGlobalState()) { + retry = (await this.Events.trigger("failed", error, eventInfo)); + if (retry != null) { + retryAfter = ~~retry; + this.Events.trigger("retry", `Retrying ${this.options.id} after ${retryAfter} ms`, eventInfo); + this.retryCount++; + return run(retryAfter); + } else { + this.doDone(eventInfo); + await free(this.options, eventInfo); + this._assertStatus("DONE"); + return this._reject(error); + } + } + } + + doDone(eventInfo) { + this._assertStatus("EXECUTING"); + this._states.next(this.options.id); + return this.Events.trigger("done", eventInfo); + } + + }; + + var Job_1 = Job; + + var BottleneckError$2, LocalDatastore, parser$2; + + parser$2 = parser; + + BottleneckError$2 = BottleneckError_1; + + LocalDatastore = class LocalDatastore { + constructor(instance, storeOptions, storeInstanceOptions) { + this.instance = instance; + this.storeOptions = storeOptions; + this.clientId = this.instance._randomIndex(); + parser$2.load(storeInstanceOptions, storeInstanceOptions, this); + this._nextRequest = this._lastReservoirRefresh = this._lastReservoirIncrease = Date.now(); + this._running = 0; + this._done = 0; + this._unblockTime = 0; + this.ready = this.Promise.resolve(); + this.clients = {}; + this._startHeartbeat(); + } + + _startHeartbeat() { + var base; + if ((this.heartbeat == null) && (((this.storeOptions.reservoirRefreshInterval != null) && (this.storeOptions.reservoirRefreshAmount != null)) || ((this.storeOptions.reservoirIncreaseInterval != null) && (this.storeOptions.reservoirIncreaseAmount != null)))) { + return typeof (base = (this.heartbeat = setInterval(() => { + var amount, incr, maximum, now, reservoir; + now = Date.now(); + if ((this.storeOptions.reservoirRefreshInterval != null) && now >= this._lastReservoirRefresh + this.storeOptions.reservoirRefreshInterval) { + this._lastReservoirRefresh = now; + this.storeOptions.reservoir = this.storeOptions.reservoirRefreshAmount; + this.instance._drainAll(this.computeCapacity()); + } + if ((this.storeOptions.reservoirIncreaseInterval != null) && now >= this._lastReservoirIncrease + this.storeOptions.reservoirIncreaseInterval) { + ({ + reservoirIncreaseAmount: amount, + reservoirIncreaseMaximum: maximum, + reservoir + } = this.storeOptions); + this._lastReservoirIncrease = now; + incr = maximum != null ? Math.min(amount, maximum - reservoir) : amount; + if (incr > 0) { + this.storeOptions.reservoir += incr; + return this.instance._drainAll(this.computeCapacity()); + } + } + }, this.heartbeatInterval))).unref === "function" ? base.unref() : void 0; + } else { + return clearInterval(this.heartbeat); + } + } + + async __publish__(message) { + await this.yieldLoop(); + return this.instance.Events.trigger("message", message.toString()); + } + + async __disconnect__(flush) { + await this.yieldLoop(); + clearInterval(this.heartbeat); + return this.Promise.resolve(); + } + + yieldLoop(t = 0) { + return new this.Promise(function(resolve, reject) { + return setTimeout(resolve, t); + }); + } + + computePenalty() { + var ref; + return (ref = this.storeOptions.penalty) != null ? ref : (15 * this.storeOptions.minTime) || 5000; + } + + async __updateSettings__(options) { + await this.yieldLoop(); + parser$2.overwrite(options, options, this.storeOptions); + this._startHeartbeat(); + this.instance._drainAll(this.computeCapacity()); + return true; + } + + async __running__() { + await this.yieldLoop(); + return this._running; + } + + async __queued__() { + await this.yieldLoop(); + return this.instance.queued(); + } + + async __done__() { + await this.yieldLoop(); + return this._done; + } + + async __groupCheck__(time) { + await this.yieldLoop(); + return (this._nextRequest + this.timeout) < time; + } + + computeCapacity() { + var maxConcurrent, reservoir; + ({maxConcurrent, reservoir} = this.storeOptions); + if ((maxConcurrent != null) && (reservoir != null)) { + return Math.min(maxConcurrent - this._running, reservoir); + } else if (maxConcurrent != null) { + return maxConcurrent - this._running; + } else if (reservoir != null) { + return reservoir; + } else { + return null; + } + } + + conditionsCheck(weight) { + var capacity; + capacity = this.computeCapacity(); + return (capacity == null) || weight <= capacity; + } + + async __incrementReservoir__(incr) { + var reservoir; + await this.yieldLoop(); + reservoir = this.storeOptions.reservoir += incr; + this.instance._drainAll(this.computeCapacity()); + return reservoir; + } + + async __currentReservoir__() { + await this.yieldLoop(); + return this.storeOptions.reservoir; + } + + isBlocked(now) { + return this._unblockTime >= now; + } + + check(weight, now) { + return this.conditionsCheck(weight) && (this._nextRequest - now) <= 0; + } + + async __check__(weight) { + var now; + await this.yieldLoop(); + now = Date.now(); + return this.check(weight, now); + } + + async __register__(index, weight, expiration) { + var now, wait; + await this.yieldLoop(); + now = Date.now(); + if (this.conditionsCheck(weight)) { + this._running += weight; + if (this.storeOptions.reservoir != null) { + this.storeOptions.reservoir -= weight; + } + wait = Math.max(this._nextRequest - now, 0); + this._nextRequest = now + wait + this.storeOptions.minTime; + return { + success: true, + wait, + reservoir: this.storeOptions.reservoir + }; + } else { + return { + success: false + }; + } + } + + strategyIsBlock() { + return this.storeOptions.strategy === 3; + } + + async __submit__(queueLength, weight) { + var blocked, now, reachedHWM; + await this.yieldLoop(); + if ((this.storeOptions.maxConcurrent != null) && weight > this.storeOptions.maxConcurrent) { + throw new BottleneckError$2(`Impossible to add a job having a weight of ${weight} to a limiter having a maxConcurrent setting of ${this.storeOptions.maxConcurrent}`); + } + now = Date.now(); + reachedHWM = (this.storeOptions.highWater != null) && queueLength === this.storeOptions.highWater && !this.check(weight, now); + blocked = this.strategyIsBlock() && (reachedHWM || this.isBlocked(now)); + if (blocked) { + this._unblockTime = now + this.computePenalty(); + this._nextRequest = this._unblockTime + this.storeOptions.minTime; + this.instance._dropAllQueued(); + } + return { + reachedHWM, + blocked, + strategy: this.storeOptions.strategy + }; + } + + async __free__(index, weight) { + await this.yieldLoop(); + this._running -= weight; + this._done += weight; + this.instance._drainAll(this.computeCapacity()); + return { + running: this._running + }; + } + + }; + + var LocalDatastore_1 = LocalDatastore; + + var BottleneckError$3, States; + + BottleneckError$3 = BottleneckError_1; + + States = class States { + constructor(status1) { + this.status = status1; + this._jobs = {}; + this.counts = this.status.map(function() { + return 0; + }); + } + + next(id) { + var current, next; + current = this._jobs[id]; + next = current + 1; + if ((current != null) && next < this.status.length) { + this.counts[current]--; + this.counts[next]++; + return this._jobs[id]++; + } else if (current != null) { + this.counts[current]--; + return delete this._jobs[id]; + } + } + + start(id) { + var initial; + initial = 0; + this._jobs[id] = initial; + return this.counts[initial]++; + } + + remove(id) { + var current; + current = this._jobs[id]; + if (current != null) { + this.counts[current]--; + delete this._jobs[id]; + } + return current != null; + } + + jobStatus(id) { + var ref; + return (ref = this.status[this._jobs[id]]) != null ? ref : null; + } + + statusJobs(status) { + var k, pos, ref, results, v; + if (status != null) { + pos = this.status.indexOf(status); + if (pos < 0) { + throw new BottleneckError$3(`status must be one of ${this.status.join(', ')}`); + } + ref = this._jobs; + results = []; + for (k in ref) { + v = ref[k]; + if (v === pos) { + results.push(k); + } + } + return results; + } else { + return Object.keys(this._jobs); + } + } + + statusCounts() { + return this.counts.reduce(((acc, v, i) => { + acc[this.status[i]] = v; + return acc; + }), {}); + } + + }; + + var States_1 = States; + + var DLList$2, Sync; + + DLList$2 = DLList_1; + + Sync = class Sync { + constructor(name, Promise) { + this.schedule = this.schedule.bind(this); + this.name = name; + this.Promise = Promise; + this._running = 0; + this._queue = new DLList$2(); + } + + isEmpty() { + return this._queue.length === 0; + } + + async _tryToRun() { + var args, cb, error, reject, resolve, returned, task; + if ((this._running < 1) && this._queue.length > 0) { + this._running++; + ({task, args, resolve, reject} = this._queue.shift()); + cb = (await (async function() { + try { + returned = (await task(...args)); + return function() { + return resolve(returned); + }; + } catch (error1) { + error = error1; + return function() { + return reject(error); + }; + } + })()); + this._running--; + this._tryToRun(); + return cb(); + } + } + + schedule(task, ...args) { + var promise, reject, resolve; + resolve = reject = null; + promise = new this.Promise(function(_resolve, _reject) { + resolve = _resolve; + return reject = _reject; + }); + this._queue.push({task, args, resolve, reject}); + this._tryToRun(); + return promise; + } + + }; + + var Sync_1 = Sync; + + var version = "2.19.5"; + var version$1 = { + version: version + }; + + var version$2 = /*#__PURE__*/Object.freeze({ + version: version, + default: version$1 + }); + + var require$$2 = () => console.log('You must import the full version of Bottleneck in order to use this feature.'); + + var require$$3 = () => console.log('You must import the full version of Bottleneck in order to use this feature.'); + + var require$$4 = () => console.log('You must import the full version of Bottleneck in order to use this feature.'); + + var Events$2, Group, IORedisConnection$1, RedisConnection$1, Scripts$1, parser$3; + + parser$3 = parser; + + Events$2 = Events_1; + + RedisConnection$1 = require$$2; + + IORedisConnection$1 = require$$3; + + Scripts$1 = require$$4; + + Group = (function() { + class Group { + constructor(limiterOptions = {}) { + this.deleteKey = this.deleteKey.bind(this); + this.limiterOptions = limiterOptions; + parser$3.load(this.limiterOptions, this.defaults, this); + this.Events = new Events$2(this); + this.instances = {}; + this.Bottleneck = Bottleneck_1; + this._startAutoCleanup(); + this.sharedConnection = this.connection != null; + if (this.connection == null) { + if (this.limiterOptions.datastore === "redis") { + this.connection = new RedisConnection$1(Object.assign({}, this.limiterOptions, {Events: this.Events})); + } else if (this.limiterOptions.datastore === "ioredis") { + this.connection = new IORedisConnection$1(Object.assign({}, this.limiterOptions, {Events: this.Events})); + } + } + } + + key(key = "") { + var ref; + return (ref = this.instances[key]) != null ? ref : (() => { + var limiter; + limiter = this.instances[key] = new this.Bottleneck(Object.assign(this.limiterOptions, { + id: `${this.id}-${key}`, + timeout: this.timeout, + connection: this.connection + })); + this.Events.trigger("created", limiter, key); + return limiter; + })(); + } + + async deleteKey(key = "") { + var deleted, instance; + instance = this.instances[key]; + if (this.connection) { + deleted = (await this.connection.__runCommand__(['del', ...Scripts$1.allKeys(`${this.id}-${key}`)])); + } + if (instance != null) { + delete this.instances[key]; + await instance.disconnect(); + } + return (instance != null) || deleted > 0; + } + + limiters() { + var k, ref, results, v; + ref = this.instances; + results = []; + for (k in ref) { + v = ref[k]; + results.push({ + key: k, + limiter: v + }); + } + return results; + } + + keys() { + return Object.keys(this.instances); + } + + async clusterKeys() { + var cursor, end, found, i, k, keys, len, next, start; + if (this.connection == null) { + return this.Promise.resolve(this.keys()); + } + keys = []; + cursor = null; + start = `b_${this.id}-`.length; + end = "_settings".length; + while (cursor !== 0) { + [next, found] = (await this.connection.__runCommand__(["scan", cursor != null ? cursor : 0, "match", `b_${this.id}-*_settings`, "count", 10000])); + cursor = ~~next; + for (i = 0, len = found.length; i < len; i++) { + k = found[i]; + keys.push(k.slice(start, -end)); + } + } + return keys; + } + + _startAutoCleanup() { + var base; + clearInterval(this.interval); + return typeof (base = (this.interval = setInterval(async() => { + var e, k, ref, results, time, v; + time = Date.now(); + ref = this.instances; + results = []; + for (k in ref) { + v = ref[k]; + try { + if ((await v._store.__groupCheck__(time))) { + results.push(this.deleteKey(k)); + } else { + results.push(void 0); + } + } catch (error) { + e = error; + results.push(v.Events.trigger("error", e)); + } + } + return results; + }, this.timeout / 2))).unref === "function" ? base.unref() : void 0; + } + + updateSettings(options = {}) { + parser$3.overwrite(options, this.defaults, this); + parser$3.overwrite(options, options, this.limiterOptions); + if (options.timeout != null) { + return this._startAutoCleanup(); + } + } + + disconnect(flush = true) { + var ref; + if (!this.sharedConnection) { + return (ref = this.connection) != null ? ref.disconnect(flush) : void 0; + } + } + + } + Group.prototype.defaults = { + timeout: 1000 * 60 * 5, + connection: null, + Promise: Promise, + id: "group-key" + }; + + return Group; + + }).call(commonjsGlobal); + + var Group_1 = Group; + + var Batcher, Events$3, parser$4; + + parser$4 = parser; + + Events$3 = Events_1; + + Batcher = (function() { + class Batcher { + constructor(options = {}) { + this.options = options; + parser$4.load(this.options, this.defaults, this); + this.Events = new Events$3(this); + this._arr = []; + this._resetPromise(); + this._lastFlush = Date.now(); + } + + _resetPromise() { + return this._promise = new this.Promise((res, rej) => { + return this._resolve = res; + }); + } + + _flush() { + clearTimeout(this._timeout); + this._lastFlush = Date.now(); + this._resolve(); + this.Events.trigger("batch", this._arr); + this._arr = []; + return this._resetPromise(); + } + + add(data) { + var ret; + this._arr.push(data); + ret = this._promise; + if (this._arr.length === this.maxSize) { + this._flush(); + } else if ((this.maxTime != null) && this._arr.length === 1) { + this._timeout = setTimeout(() => { + return this._flush(); + }, this.maxTime); + } + return ret; + } + + } + Batcher.prototype.defaults = { + maxTime: null, + maxSize: null, + Promise: Promise + }; + + return Batcher; + + }).call(commonjsGlobal); + + var Batcher_1 = Batcher; + + var require$$4$1 = () => console.log('You must import the full version of Bottleneck in order to use this feature.'); + + var require$$8 = getCjsExportFromNamespace(version$2); + + var Bottleneck, DEFAULT_PRIORITY$1, Events$4, Job$1, LocalDatastore$1, NUM_PRIORITIES$1, Queues$1, RedisDatastore$1, States$1, Sync$1, parser$5, + splice = [].splice; + + NUM_PRIORITIES$1 = 10; + + DEFAULT_PRIORITY$1 = 5; + + parser$5 = parser; + + Queues$1 = Queues_1; + + Job$1 = Job_1; + + LocalDatastore$1 = LocalDatastore_1; + + RedisDatastore$1 = require$$4$1; + + Events$4 = Events_1; + + States$1 = States_1; + + Sync$1 = Sync_1; + + Bottleneck = (function() { + class Bottleneck { + constructor(options = {}, ...invalid) { + var storeInstanceOptions, storeOptions; + this._addToQueue = this._addToQueue.bind(this); + this._validateOptions(options, invalid); + parser$5.load(options, this.instanceDefaults, this); + this._queues = new Queues$1(NUM_PRIORITIES$1); + this._scheduled = {}; + this._states = new States$1(["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(this.trackDoneStatus ? ["DONE"] : [])); + this._limiter = null; + this.Events = new Events$4(this); + this._submitLock = new Sync$1("submit", this.Promise); + this._registerLock = new Sync$1("register", this.Promise); + storeOptions = parser$5.load(options, this.storeDefaults, {}); + this._store = (function() { + if (this.datastore === "redis" || this.datastore === "ioredis" || (this.connection != null)) { + storeInstanceOptions = parser$5.load(options, this.redisStoreDefaults, {}); + return new RedisDatastore$1(this, storeOptions, storeInstanceOptions); + } else if (this.datastore === "local") { + storeInstanceOptions = parser$5.load(options, this.localStoreDefaults, {}); + return new LocalDatastore$1(this, storeOptions, storeInstanceOptions); + } else { + throw new Bottleneck.prototype.BottleneckError(`Invalid datastore type: ${this.datastore}`); + } + }).call(this); + this._queues.on("leftzero", () => { + var ref; + return (ref = this._store.heartbeat) != null ? typeof ref.ref === "function" ? ref.ref() : void 0 : void 0; + }); + this._queues.on("zero", () => { + var ref; + return (ref = this._store.heartbeat) != null ? typeof ref.unref === "function" ? ref.unref() : void 0 : void 0; + }); + } + + _validateOptions(options, invalid) { + if (!((options != null) && typeof options === "object" && invalid.length === 0)) { + throw new Bottleneck.prototype.BottleneckError("Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1."); + } + } + + ready() { + return this._store.ready; + } + + clients() { + return this._store.clients; + } + + channel() { + return `b_${this.id}`; + } + + channel_client() { + return `b_${this.id}_${this._store.clientId}`; + } + + publish(message) { + return this._store.__publish__(message); + } + + disconnect(flush = true) { + return this._store.__disconnect__(flush); + } + + chain(_limiter) { + this._limiter = _limiter; + return this; + } + + queued(priority) { + return this._queues.queued(priority); + } + + clusterQueued() { + return this._store.__queued__(); + } + + empty() { + return this.queued() === 0 && this._submitLock.isEmpty(); + } + + running() { + return this._store.__running__(); + } + + done() { + return this._store.__done__(); + } + + jobStatus(id) { + return this._states.jobStatus(id); + } + + jobs(status) { + return this._states.statusJobs(status); + } + + counts() { + return this._states.statusCounts(); + } + + _randomIndex() { + return Math.random().toString(36).slice(2); + } + + check(weight = 1) { + return this._store.__check__(weight); + } + + _clearGlobalState(index) { + if (this._scheduled[index] != null) { + clearTimeout(this._scheduled[index].expiration); + delete this._scheduled[index]; + return true; + } else { + return false; + } + } + + async _free(index, job, options, eventInfo) { + var e, running; + try { + ({running} = (await this._store.__free__(index, options.weight))); + this.Events.trigger("debug", `Freed ${options.id}`, eventInfo); + if (running === 0 && this.empty()) { + return this.Events.trigger("idle"); + } + } catch (error1) { + e = error1; + return this.Events.trigger("error", e); + } + } + + _run(index, job, wait) { + var clearGlobalState, free, run; + job.doRun(); + clearGlobalState = this._clearGlobalState.bind(this, index); + run = this._run.bind(this, index, job); + free = this._free.bind(this, index, job); + return this._scheduled[index] = { + timeout: setTimeout(() => { + return job.doExecute(this._limiter, clearGlobalState, run, free); + }, wait), + expiration: job.options.expiration != null ? setTimeout(function() { + return job.doExpire(clearGlobalState, run, free); + }, wait + job.options.expiration) : void 0, + job: job + }; + } + + _drainOne(capacity) { + return this._registerLock.schedule(() => { + var args, index, next, options, queue; + if (this.queued() === 0) { + return this.Promise.resolve(null); + } + queue = this._queues.getFirst(); + ({options, args} = next = queue.first()); + if ((capacity != null) && options.weight > capacity) { + return this.Promise.resolve(null); + } + this.Events.trigger("debug", `Draining ${options.id}`, {args, options}); + index = this._randomIndex(); + return this._store.__register__(index, options.weight, options.expiration).then(({success, wait, reservoir}) => { + var empty; + this.Events.trigger("debug", `Drained ${options.id}`, {success, args, options}); + if (success) { + queue.shift(); + empty = this.empty(); + if (empty) { + this.Events.trigger("empty"); + } + if (reservoir === 0) { + this.Events.trigger("depleted", empty); + } + this._run(index, next, wait); + return this.Promise.resolve(options.weight); + } else { + return this.Promise.resolve(null); + } + }); + }); + } + + _drainAll(capacity, total = 0) { + return this._drainOne(capacity).then((drained) => { + var newCapacity; + if (drained != null) { + newCapacity = capacity != null ? capacity - drained : capacity; + return this._drainAll(newCapacity, total + drained); + } else { + return this.Promise.resolve(total); + } + }).catch((e) => { + return this.Events.trigger("error", e); + }); + } + + _dropAllQueued(message) { + return this._queues.shiftAll(function(job) { + return job.doDrop({message}); + }); + } + + stop(options = {}) { + var done, waitForExecuting; + options = parser$5.load(options, this.stopDefaults); + waitForExecuting = (at) => { + var finished; + finished = () => { + var counts; + counts = this._states.counts; + return (counts[0] + counts[1] + counts[2] + counts[3]) === at; + }; + return new this.Promise((resolve, reject) => { + if (finished()) { + return resolve(); + } else { + return this.on("done", () => { + if (finished()) { + this.removeAllListeners("done"); + return resolve(); + } + }); + } + }); + }; + done = options.dropWaitingJobs ? (this._run = function(index, next) { + return next.doDrop({ + message: options.dropErrorMessage + }); + }, this._drainOne = () => { + return this.Promise.resolve(null); + }, this._registerLock.schedule(() => { + return this._submitLock.schedule(() => { + var k, ref, v; + ref = this._scheduled; + for (k in ref) { + v = ref[k]; + if (this.jobStatus(v.job.options.id) === "RUNNING") { + clearTimeout(v.timeout); + clearTimeout(v.expiration); + v.job.doDrop({ + message: options.dropErrorMessage + }); + } + } + this._dropAllQueued(options.dropErrorMessage); + return waitForExecuting(0); + }); + })) : this.schedule({ + priority: NUM_PRIORITIES$1 - 1, + weight: 0 + }, () => { + return waitForExecuting(1); + }); + this._receive = function(job) { + return job._reject(new Bottleneck.prototype.BottleneckError(options.enqueueErrorMessage)); + }; + this.stop = () => { + return this.Promise.reject(new Bottleneck.prototype.BottleneckError("stop() has already been called")); + }; + return done; + } + + async _addToQueue(job) { + var args, blocked, error, options, reachedHWM, shifted, strategy; + ({args, options} = job); + try { + ({reachedHWM, blocked, strategy} = (await this._store.__submit__(this.queued(), options.weight))); + } catch (error1) { + error = error1; + this.Events.trigger("debug", `Could not queue ${options.id}`, {args, options, error}); + job.doDrop({error}); + return false; + } + if (blocked) { + job.doDrop(); + return true; + } else if (reachedHWM) { + shifted = strategy === Bottleneck.prototype.strategy.LEAK ? this._queues.shiftLastFrom(options.priority) : strategy === Bottleneck.prototype.strategy.OVERFLOW_PRIORITY ? this._queues.shiftLastFrom(options.priority + 1) : strategy === Bottleneck.prototype.strategy.OVERFLOW ? job : void 0; + if (shifted != null) { + shifted.doDrop(); + } + if ((shifted == null) || strategy === Bottleneck.prototype.strategy.OVERFLOW) { + if (shifted == null) { + job.doDrop(); + } + return reachedHWM; + } + } + job.doQueue(reachedHWM, blocked); + this._queues.push(job); + await this._drainAll(); + return reachedHWM; + } + + _receive(job) { + if (this._states.jobStatus(job.options.id) != null) { + job._reject(new Bottleneck.prototype.BottleneckError(`A job with the same id already exists (id=${job.options.id})`)); + return false; + } else { + job.doReceive(); + return this._submitLock.schedule(this._addToQueue, job); + } + } + + submit(...args) { + var cb, fn, job, options, ref, ref1, task; + if (typeof args[0] === "function") { + ref = args, [fn, ...args] = ref, [cb] = splice.call(args, -1); + options = parser$5.load({}, this.jobDefaults); + } else { + ref1 = args, [options, fn, ...args] = ref1, [cb] = splice.call(args, -1); + options = parser$5.load(options, this.jobDefaults); + } + task = (...args) => { + return new this.Promise(function(resolve, reject) { + return fn(...args, function(...args) { + return (args[0] != null ? reject : resolve)(args); + }); + }); + }; + job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise); + job.promise.then(function(args) { + return typeof cb === "function" ? cb(...args) : void 0; + }).catch(function(args) { + if (Array.isArray(args)) { + return typeof cb === "function" ? cb(...args) : void 0; + } else { + return typeof cb === "function" ? cb(args) : void 0; + } + }); + return this._receive(job); + } + + schedule(...args) { + var job, options, task; + if (typeof args[0] === "function") { + [task, ...args] = args; + options = {}; + } else { + [options, task, ...args] = args; + } + job = new Job$1(task, args, options, this.jobDefaults, this.rejectOnDrop, this.Events, this._states, this.Promise); + this._receive(job); + return job.promise; + } + + wrap(fn) { + var schedule, wrapped; + schedule = this.schedule.bind(this); + wrapped = function(...args) { + return schedule(fn.bind(this), ...args); + }; + wrapped.withOptions = function(options, ...args) { + return schedule(options, fn, ...args); + }; + return wrapped; + } + + async updateSettings(options = {}) { + await this._store.__updateSettings__(parser$5.overwrite(options, this.storeDefaults)); + parser$5.overwrite(options, this.instanceDefaults, this); + return this; + } + + currentReservoir() { + return this._store.__currentReservoir__(); + } + + incrementReservoir(incr = 0) { + return this._store.__incrementReservoir__(incr); + } + + } + Bottleneck.default = Bottleneck; + + Bottleneck.Events = Events$4; + + Bottleneck.version = Bottleneck.prototype.version = require$$8.version; + + Bottleneck.strategy = Bottleneck.prototype.strategy = { + LEAK: 1, + OVERFLOW: 2, + OVERFLOW_PRIORITY: 4, + BLOCK: 3 + }; + + Bottleneck.BottleneckError = Bottleneck.prototype.BottleneckError = BottleneckError_1; + + Bottleneck.Group = Bottleneck.prototype.Group = Group_1; + + Bottleneck.RedisConnection = Bottleneck.prototype.RedisConnection = require$$2; + + Bottleneck.IORedisConnection = Bottleneck.prototype.IORedisConnection = require$$3; + + Bottleneck.Batcher = Bottleneck.prototype.Batcher = Batcher_1; + + Bottleneck.prototype.jobDefaults = { + priority: DEFAULT_PRIORITY$1, + weight: 1, + expiration: null, + id: "" + }; + + Bottleneck.prototype.storeDefaults = { + maxConcurrent: null, + minTime: 0, + highWater: null, + strategy: Bottleneck.prototype.strategy.LEAK, + penalty: null, + reservoir: null, + reservoirRefreshInterval: null, + reservoirRefreshAmount: null, + reservoirIncreaseInterval: null, + reservoirIncreaseAmount: null, + reservoirIncreaseMaximum: null + }; + + Bottleneck.prototype.localStoreDefaults = { + Promise: Promise, + timeout: null, + heartbeatInterval: 250 + }; + + Bottleneck.prototype.redisStoreDefaults = { + Promise: Promise, + timeout: null, + heartbeatInterval: 5000, + clientTimeout: 10000, + Redis: null, + clientOptions: {}, + clusterNodes: null, + clearDatastore: false, + connection: null + }; + + Bottleneck.prototype.instanceDefaults = { + datastore: "local", + connection: null, + id: "", + rejectOnDrop: true, + trackDoneStatus: false, + Promise: Promise + }; + + Bottleneck.prototype.stopDefaults = { + enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs.", + dropWaitingJobs: true, + dropErrorMessage: "This limiter has been stopped." + }; + + return Bottleneck; + + }).call(commonjsGlobal); + + var Bottleneck_1 = Bottleneck; + + var lib = Bottleneck_1; + + return lib; + +}))); diff --git a/node_modules/bottleneck/package.json b/node_modules/bottleneck/package.json new file mode 100644 index 000000000..8abb77421 --- /dev/null +++ b/node_modules/bottleneck/package.json @@ -0,0 +1,56 @@ +{ + "name": "bottleneck", + "version": "2.19.5", + "description": "Distributed task scheduler and rate limiter", + "main": "lib/index.js", + "typings": "bottleneck.d.ts", + "scripts": { + "test": "mocha test", + "test-all": "./scripts/test_all.sh" + }, + "repository": { + "type": "git", + "url": "https://github.com/SGrondin/bottleneck" + }, + "keywords": [ + "async rate limiter", + "rate limiter", + "rate limiting", + "async", + "rate", + "limiting", + "limiter", + "throttle", + "throttling", + "throttler", + "load", + "clustering" + ], + "author": { + "name": "Simon Grondin" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/SGrondin/bottleneck/issues" + }, + "devDependencies": { + "@babel/core": "^7.5.0", + "@babel/preset-env": "^7.5.0", + "@types/es6-promise": "0.0.33", + "assert": "^1.5.0", + "coffeescript": "2.4.x", + "ejs-cli": "github:SGrondin/ejs-cli#master", + "ioredis": "^4.11.1", + "leakage": "^0.4.0", + "mocha": "^6.1.4", + "redis": "^2.8.0", + "regenerator-runtime": "^0.12.1", + "rollup": "^0.66.6", + "rollup-plugin-babel": "^4.3.3", + "rollup-plugin-commonjs": "^9.3.4", + "rollup-plugin-json": "^3.1.0", + "rollup-plugin-node-resolve": "^3.4.0", + "typescript": "^2.6.2" + }, + "dependencies": {} +} \ No newline at end of file diff --git a/node_modules/bottleneck/rollup.config.es5.js b/node_modules/bottleneck/rollup.config.es5.js new file mode 100644 index 000000000..8b0483e27 --- /dev/null +++ b/node_modules/bottleneck/rollup.config.es5.js @@ -0,0 +1,34 @@ +import json from 'rollup-plugin-json'; +import resolve from 'rollup-plugin-node-resolve'; +import commonjs from 'rollup-plugin-commonjs'; +import babel from 'rollup-plugin-babel'; + +const bannerLines = [ + 'This file contains the full Bottleneck library (MIT) compiled to ES5.', + 'https://github.com/SGrondin/bottleneck', + 'It also contains the regenerator-runtime (MIT), necessary for Babel-generated ES5 code to execute promise and async/await code.', + 'See the following link for Copyright and License information:', + 'https://github.com/facebook/regenerator/blob/master/packages/regenerator-runtime/runtime.js', +].map(x => ` * ${x}`).join('\n'); +const banner = `/**\n${bannerLines}\n */`; + +export default { + input: 'lib/es5.js', + output: { + name: 'Bottleneck', + file: 'es5.js', + sourcemap: false, + globals: {}, + format: 'umd', + banner + }, + external: [], + plugins: [ + json(), + resolve(), + commonjs(), + babel({ + exclude: 'node_modules/**' + }) + ] +}; diff --git a/node_modules/bottleneck/rollup.config.light.js b/node_modules/bottleneck/rollup.config.light.js new file mode 100644 index 000000000..6a72c709e --- /dev/null +++ b/node_modules/bottleneck/rollup.config.light.js @@ -0,0 +1,44 @@ +import commonjs from 'rollup-plugin-commonjs'; +import json from 'rollup-plugin-json'; +import resolve from 'rollup-plugin-node-resolve'; + +const bannerLines = [ + 'This file contains the Bottleneck library (MIT), compiled to ES2017, and without Clustering support.', + 'https://github.com/SGrondin/bottleneck', +].map(x => ` * ${x}`).join('\n'); +const banner = `/**\n${bannerLines}\n */`; + +const missing = `export default () => console.log('You must import the full version of Bottleneck in order to use this feature.');`; +const exclude = [ + 'RedisDatastore.js', + 'RedisConnection.js', + 'IORedisConnection.js', + 'Scripts.js' +]; + +export default { + input: 'lib/index.js', + output: { + name: 'Bottleneck', + file: 'light.js', + sourcemap: false, + globals: {}, + format: 'umd', + banner + }, + external: [], + plugins: [ + json(), + { + load: id => { + const chunks = id.split('/'); + const file = chunks[chunks.length - 1]; + if (exclude.indexOf(file) >= 0) { + return missing + } + } + }, + resolve(), + commonjs() + ] +}; diff --git a/node_modules/bottleneck/scripts/assemble_lua.js b/node_modules/bottleneck/scripts/assemble_lua.js new file mode 100644 index 000000000..eb7a93b79 --- /dev/null +++ b/node_modules/bottleneck/scripts/assemble_lua.js @@ -0,0 +1,25 @@ +var fs = require('fs') + +var input = __dirname + '/../src/redis' +var loaded = {} + +var promises = fs.readdirSync(input).map(function (file) { + return new Promise(function (resolve, reject) { + fs.readFile(input + '/' + file, function (err, data) { + if (err != null) { + return reject(err) + } + loaded[file] = data.toString('utf8') + return resolve() + }) + }) +}) + +Promise.all(promises) +.then(function () { + console.log(JSON.stringify(loaded, Object.keys(loaded).sort(), 2)) +}) +.catch(function (err) { + console.error(err) + process.exit(1) +}) diff --git a/node_modules/bottleneck/scripts/build.sh b/node_modules/bottleneck/scripts/build.sh new file mode 100755 index 000000000..4aadfc659 --- /dev/null +++ b/node_modules/bottleneck/scripts/build.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +set -e + +if [ ! -d node_modules ]; then + echo "[B] Run 'npm install' first" + exit 1 +fi + + +clean() { + rm -f .babelrc + rm -rf lib/* + node scripts/version.js > lib/version.json + node scripts/assemble_lua.js > lib/lua.json +} + +makeLib10() { + echo '[B] Compiling Bottleneck to Node 10+...' + npx coffee --compile --bare --no-header src/*.coffee + mv src/*.js lib/ +} + +makeLib6() { + echo '[B] Compiling Bottleneck to Node 6+...' + ln -s .babelrc.lib .babelrc + npx coffee --compile --bare --no-header --transpile src/*.coffee + mv src/*.js lib/ +} + +makeES5() { + echo '[B] Compiling Bottleneck to ES5...' + ln -s .babelrc.es5 .babelrc + npx coffee --compile --bare --no-header src/*.coffee + mv src/*.js lib/ + + echo '[B] Assembling ES5 bundle...' + npx rollup -c rollup.config.es5.js +} + +makeLight() { + makeLib10 + + echo '[B] Assembling light bundle...' + npx rollup -c rollup.config.light.js +} + +makeTypings() { + echo '[B] Compiling and testing TS typings...' + npx ejs-cli bottleneck.d.ts.ejs > bottleneck.d.ts + npx tsc --noEmit --strict test.ts +} + +if [ "$1" = 'dev' ]; then + clean + makeLib10 +elif [ "$1" = 'bench' ]; then + clean + makeLib6 +elif [ "$1" = 'es5' ]; then + clean + makeES5 +elif [ "$1" = 'light' ]; then + clean + makeLight +elif [ "$1" = 'typings' ]; then + makeTypings +else + clean + makeES5 + + clean + makeLight + + clean + makeLib6 + makeTypings +fi + +rm -f .babelrc + +echo '[B] Done!' diff --git a/node_modules/bottleneck/scripts/test_all.sh b/node_modules/bottleneck/scripts/test_all.sh new file mode 100755 index 000000000..afc689292 --- /dev/null +++ b/node_modules/bottleneck/scripts/test_all.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -e + +source .env + +echo 'ioredis tests' +DATASTORE=ioredis npm test + +echo 'NodeRedis tests' +DATASTORE=redis npm test + +echo 'ES5 bundle tests' +BUILD=es5 npm test + +echo 'Light bundle tests' +BUILD=light npm test + +echo 'Local tests' +npm test diff --git a/node_modules/bottleneck/scripts/version.js b/node_modules/bottleneck/scripts/version.js new file mode 100644 index 000000000..75671dab0 --- /dev/null +++ b/node_modules/bottleneck/scripts/version.js @@ -0,0 +1,3 @@ +const packagejson = require('../package.json') + +console.log(JSON.stringify({version: packagejson.version})) diff --git a/node_modules/bottleneck/src/Batcher.coffee b/node_modules/bottleneck/src/Batcher.coffee new file mode 100644 index 000000000..5ddd66dcd --- /dev/null +++ b/node_modules/bottleneck/src/Batcher.coffee @@ -0,0 +1,39 @@ +parser = require "./parser" +Events = require "./Events" + +class Batcher + defaults: + maxTime: null + maxSize: null + Promise: Promise + + constructor: (@options={}) -> + parser.load @options, @defaults, @ + @Events = new Events @ + @_arr = [] + @_resetPromise() + @_lastFlush = Date.now() + + _resetPromise: -> + @_promise = new @Promise (res, rej) => @_resolve = res + + _flush: -> + clearTimeout @_timeout + @_lastFlush = Date.now() + @_resolve() + @Events.trigger "batch", @_arr + @_arr = [] + @_resetPromise() + + add: (data) -> + @_arr.push data + ret = @_promise + if @_arr.length == @maxSize + @_flush() + else if @maxTime? and @_arr.length == 1 + @_timeout = setTimeout => + @_flush() + , @maxTime + ret + +module.exports = Batcher diff --git a/node_modules/bottleneck/src/Bottleneck.coffee b/node_modules/bottleneck/src/Bottleneck.coffee new file mode 100644 index 000000000..37db2befc --- /dev/null +++ b/node_modules/bottleneck/src/Bottleneck.coffee @@ -0,0 +1,298 @@ +NUM_PRIORITIES = 10 +DEFAULT_PRIORITY = 5 + +parser = require "./parser" +Queues = require "./Queues" +Job = require "./Job" +LocalDatastore = require "./LocalDatastore" +RedisDatastore = require "./RedisDatastore" +Events = require "./Events" +States = require "./States" +Sync = require "./Sync" + +class Bottleneck + Bottleneck.default = Bottleneck + Bottleneck.Events = Events + Bottleneck.version = Bottleneck::version = require("./version.json").version + Bottleneck.strategy = Bottleneck::strategy = { LEAK:1, OVERFLOW:2, OVERFLOW_PRIORITY:4, BLOCK:3 } + Bottleneck.BottleneckError = Bottleneck::BottleneckError = require "./BottleneckError" + Bottleneck.Group = Bottleneck::Group = require "./Group" + Bottleneck.RedisConnection = Bottleneck::RedisConnection = require "./RedisConnection" + Bottleneck.IORedisConnection = Bottleneck::IORedisConnection = require "./IORedisConnection" + Bottleneck.Batcher = Bottleneck::Batcher = require "./Batcher" + jobDefaults: + priority: DEFAULT_PRIORITY + weight: 1 + expiration: null + id: "" + storeDefaults: + maxConcurrent: null + minTime: 0 + highWater: null + strategy: Bottleneck::strategy.LEAK + penalty: null + reservoir: null + reservoirRefreshInterval: null + reservoirRefreshAmount: null + reservoirIncreaseInterval: null + reservoirIncreaseAmount: null + reservoirIncreaseMaximum: null + localStoreDefaults: + Promise: Promise + timeout: null + heartbeatInterval: 250 + redisStoreDefaults: + Promise: Promise + timeout: null + heartbeatInterval: 5000 + clientTimeout: 10000 + Redis: null + clientOptions: {} + clusterNodes: null + clearDatastore: false + connection: null + instanceDefaults: + datastore: "local" + connection: null + id: "" + rejectOnDrop: true + trackDoneStatus: false + Promise: Promise + stopDefaults: + enqueueErrorMessage: "This limiter has been stopped and cannot accept new jobs." + dropWaitingJobs: true + dropErrorMessage: "This limiter has been stopped." + + constructor: (options={}, invalid...) -> + @_validateOptions options, invalid + parser.load options, @instanceDefaults, @ + @_queues = new Queues NUM_PRIORITIES + @_scheduled = {} + @_states = new States ["RECEIVED", "QUEUED", "RUNNING", "EXECUTING"].concat(if @trackDoneStatus then ["DONE"] else []) + @_limiter = null + @Events = new Events @ + @_submitLock = new Sync "submit", @Promise + @_registerLock = new Sync "register", @Promise + storeOptions = parser.load options, @storeDefaults, {} + + @_store = if @datastore == "redis" or @datastore == "ioredis" or @connection? + storeInstanceOptions = parser.load options, @redisStoreDefaults, {} + new RedisDatastore @, storeOptions, storeInstanceOptions + else if @datastore == "local" + storeInstanceOptions = parser.load options, @localStoreDefaults, {} + new LocalDatastore @, storeOptions, storeInstanceOptions + else + throw new Bottleneck::BottleneckError "Invalid datastore type: #{@datastore}" + + @_queues.on "leftzero", => @_store.heartbeat?.ref?() + @_queues.on "zero", => @_store.heartbeat?.unref?() + + _validateOptions: (options, invalid) -> + unless options? and typeof options == "object" and invalid.length == 0 + throw new Bottleneck::BottleneckError "Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you're upgrading from Bottleneck v1." + + ready: -> @_store.ready + + clients: -> @_store.clients + + channel: -> "b_#{@id}" + + channel_client: -> "b_#{@id}_#{@_store.clientId}" + + publish: (message) -> @_store.__publish__ message + + disconnect: (flush=true) -> @_store.__disconnect__ flush + + chain: (@_limiter) -> @ + + queued: (priority) -> @_queues.queued priority + + clusterQueued: -> @_store.__queued__() + + empty: -> @queued() == 0 and @_submitLock.isEmpty() + + running: -> @_store.__running__() + + done: -> @_store.__done__() + + jobStatus: (id) -> @_states.jobStatus id + + jobs: (status) -> @_states.statusJobs status + + counts: -> @_states.statusCounts() + + _randomIndex: -> Math.random().toString(36).slice(2) + + check: (weight=1) -> @_store.__check__ weight + + _clearGlobalState: (index) -> + if @_scheduled[index]? + clearTimeout @_scheduled[index].expiration + delete @_scheduled[index] + true + else false + + _free: (index, job, options, eventInfo) -> + try + { running } = await @_store.__free__ index, options.weight + @Events.trigger "debug", "Freed #{options.id}", eventInfo + if running == 0 and @empty() then @Events.trigger "idle" + catch e + @Events.trigger "error", e + + _run: (index, job, wait) -> + job.doRun() + clearGlobalState = @_clearGlobalState.bind @, index + run = @_run.bind @, index, job + free = @_free.bind @, index, job + + @_scheduled[index] = + timeout: setTimeout => + job.doExecute @_limiter, clearGlobalState, run, free + , wait + expiration: if job.options.expiration? then setTimeout -> + job.doExpire clearGlobalState, run, free + , wait + job.options.expiration + job: job + + _drainOne: (capacity) -> + @_registerLock.schedule => + if @queued() == 0 then return @Promise.resolve null + queue = @_queues.getFirst() + { options, args } = next = queue.first() + if capacity? and options.weight > capacity then return @Promise.resolve null + @Events.trigger "debug", "Draining #{options.id}", { args, options } + index = @_randomIndex() + @_store.__register__ index, options.weight, options.expiration + .then ({ success, wait, reservoir }) => + @Events.trigger "debug", "Drained #{options.id}", { success, args, options } + if success + queue.shift() + empty = @empty() + if empty then @Events.trigger "empty" + if reservoir == 0 then @Events.trigger "depleted", empty + @_run index, next, wait + @Promise.resolve options.weight + else + @Promise.resolve null + + _drainAll: (capacity, total=0) -> + @_drainOne(capacity) + .then (drained) => + if drained? + newCapacity = if capacity? then capacity - drained else capacity + @_drainAll(newCapacity, total + drained) + else @Promise.resolve total + .catch (e) => @Events.trigger "error", e + + _dropAllQueued: (message) -> @_queues.shiftAll (job) -> job.doDrop { message } + + stop: (options={}) -> + options = parser.load options, @stopDefaults + waitForExecuting = (at) => + finished = => + counts = @_states.counts + (counts[0] + counts[1] + counts[2] + counts[3]) == at + new @Promise (resolve, reject) => + if finished() then resolve() + else + @on "done", => + if finished() + @removeAllListeners "done" + resolve() + done = if options.dropWaitingJobs + @_run = (index, next) -> next.doDrop { message: options.dropErrorMessage } + @_drainOne = => @Promise.resolve null + @_registerLock.schedule => @_submitLock.schedule => + for k, v of @_scheduled + if @jobStatus(v.job.options.id) == "RUNNING" + clearTimeout v.timeout + clearTimeout v.expiration + v.job.doDrop { message: options.dropErrorMessage } + @_dropAllQueued options.dropErrorMessage + waitForExecuting(0) + else + @schedule { priority: NUM_PRIORITIES - 1, weight: 0 }, => waitForExecuting(1) + @_receive = (job) -> job._reject new Bottleneck::BottleneckError options.enqueueErrorMessage + @stop = => @Promise.reject new Bottleneck::BottleneckError "stop() has already been called" + done + + _addToQueue: (job) => + { args, options } = job + try + { reachedHWM, blocked, strategy } = await @_store.__submit__ @queued(), options.weight + catch error + @Events.trigger "debug", "Could not queue #{options.id}", { args, options, error } + job.doDrop { error } + return false + + if blocked + job.doDrop() + return true + else if reachedHWM + shifted = if strategy == Bottleneck::strategy.LEAK then @_queues.shiftLastFrom(options.priority) + else if strategy == Bottleneck::strategy.OVERFLOW_PRIORITY then @_queues.shiftLastFrom(options.priority + 1) + else if strategy == Bottleneck::strategy.OVERFLOW then job + if shifted? then shifted.doDrop() + if not shifted? or strategy == Bottleneck::strategy.OVERFLOW + if not shifted? then job.doDrop() + return reachedHWM + + job.doQueue reachedHWM, blocked + @_queues.push job + await @_drainAll() + reachedHWM + + _receive: (job) -> + if @_states.jobStatus(job.options.id)? + job._reject new Bottleneck::BottleneckError "A job with the same id already exists (id=#{job.options.id})" + false + else + job.doReceive() + @_submitLock.schedule @_addToQueue, job + + submit: (args...) -> + if typeof args[0] == "function" + [fn, args..., cb] = args + options = parser.load {}, @jobDefaults + else + [options, fn, args..., cb] = args + options = parser.load options, @jobDefaults + + task = (args...) => + new @Promise (resolve, reject) -> + fn args..., (args...) -> + (if args[0]? then reject else resolve) args + + job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise + job.promise + .then (args) -> cb? args... + .catch (args) -> if Array.isArray args then cb? args... else cb? args + @_receive job + + schedule: (args...) -> + if typeof args[0] == "function" + [task, args...] = args + options = {} + else + [options, task, args...] = args + job = new Job task, args, options, @jobDefaults, @rejectOnDrop, @Events, @_states, @Promise + @_receive job + job.promise + + wrap: (fn) -> + schedule = @schedule.bind @ + wrapped = (args...) -> schedule fn.bind(@), args... + wrapped.withOptions = (options, args...) -> schedule options, fn, args... + wrapped + + updateSettings: (options={}) -> + await @_store.__updateSettings__ parser.overwrite options, @storeDefaults + parser.overwrite options, @instanceDefaults, @ + @ + + currentReservoir: -> @_store.__currentReservoir__() + + incrementReservoir: (incr=0) -> @_store.__incrementReservoir__ incr + +module.exports = Bottleneck diff --git a/node_modules/bottleneck/src/BottleneckError.coffee b/node_modules/bottleneck/src/BottleneckError.coffee new file mode 100644 index 000000000..157b8ac6a --- /dev/null +++ b/node_modules/bottleneck/src/BottleneckError.coffee @@ -0,0 +1,3 @@ +class BottleneckError extends Error + +module.exports = BottleneckError diff --git a/node_modules/bottleneck/src/DLList.coffee b/node_modules/bottleneck/src/DLList.coffee new file mode 100644 index 000000000..9dded30cc --- /dev/null +++ b/node_modules/bottleneck/src/DLList.coffee @@ -0,0 +1,38 @@ +class DLList + constructor: (@incr, @decr) -> + @_first = null + @_last = null + @length = 0 + push: (value) -> + @length++ + @incr?() + node = { value, prev: @_last, next: null } + if @_last? + @_last.next = node + @_last = node + else @_first = @_last = node + undefined + shift: () -> + if not @_first? then return + else + @length-- + @decr?() + value = @_first.value + if (@_first = @_first.next)? + @_first.prev = null + else + @_last = null + value + first: () -> if @_first? then @_first.value + getArray: () -> + node = @_first + while node? then (ref = node; node = node.next; ref.value) + forEachShift: (cb) -> + node = @shift() + while node? then (cb node; node = @shift()) + undefined + debug: () -> + node = @_first + while node? then (ref = node; node = node.next; { value: ref.value, prev: ref.prev?.value, next: ref.next?.value }) + +module.exports = DLList diff --git a/node_modules/bottleneck/src/Events.coffee b/node_modules/bottleneck/src/Events.coffee new file mode 100644 index 000000000..c96b31a43 --- /dev/null +++ b/node_modules/bottleneck/src/Events.coffee @@ -0,0 +1,38 @@ +class Events + constructor: (@instance) -> + @_events = {} + if @instance.on? or @instance.once? or @instance.removeAllListeners? + throw new Error "An Emitter already exists for this object" + @instance.on = (name, cb) => @_addListener name, "many", cb + @instance.once = (name, cb) => @_addListener name, "once", cb + @instance.removeAllListeners = (name=null) => + if name? then delete @_events[name] else @_events = {} + _addListener: (name, status, cb) -> + @_events[name] ?= [] + @_events[name].push {cb, status} + @instance + listenerCount: (name) -> + if @_events[name]? then @_events[name].length else 0 + trigger: (name, args...) -> + try + if name != "debug" then @trigger "debug", "Event triggered: #{name}", args + return unless @_events[name]? + @_events[name] = @_events[name].filter (listener) -> listener.status != "none" + promises = @_events[name].map (listener) => + return if listener.status == "none" + if listener.status == "once" then listener.status = "none" + try + returned = listener.cb?(args...) + if typeof returned?.then == "function" + await returned + else + returned + catch e + if "name" != "error" then @trigger "error", e + null + (await Promise.all promises).find (x) -> x? + catch e + if "name" != "error" then @trigger "error", e + null + +module.exports = Events diff --git a/node_modules/bottleneck/src/Group.coffee b/node_modules/bottleneck/src/Group.coffee new file mode 100644 index 000000000..210b5024d --- /dev/null +++ b/node_modules/bottleneck/src/Group.coffee @@ -0,0 +1,80 @@ +parser = require "./parser" +Events = require "./Events" +RedisConnection = require "./RedisConnection" +IORedisConnection = require "./IORedisConnection" +Scripts = require "./Scripts" + +class Group + defaults: + timeout: 1000 * 60 * 5 + connection: null + Promise: Promise + id: "group-key" + + constructor: (@limiterOptions={}) -> + parser.load @limiterOptions, @defaults, @ + @Events = new Events @ + @instances = {} + @Bottleneck = require "./Bottleneck" + @_startAutoCleanup() + @sharedConnection = @connection? + + if !@connection? + if @limiterOptions.datastore == "redis" + @connection = new RedisConnection Object.assign {}, @limiterOptions, { @Events } + else if @limiterOptions.datastore == "ioredis" + @connection = new IORedisConnection Object.assign {}, @limiterOptions, { @Events } + + key: (key="") -> @instances[key] ? do => + limiter = @instances[key] = new @Bottleneck Object.assign @limiterOptions, { + id: "#{@id}-#{key}", + @timeout, + @connection + } + @Events.trigger "created", limiter, key + limiter + + deleteKey: (key="") => + instance = @instances[key] + if @connection + deleted = await @connection.__runCommand__ ['del', Scripts.allKeys("#{@id}-#{key}")...] + if instance? + delete @instances[key] + await instance.disconnect() + instance? or deleted > 0 + + limiters: -> { key: k, limiter: v } for k, v of @instances + + keys: -> Object.keys @instances + + clusterKeys: -> + if !@connection? then return @Promise.resolve @keys() + keys = [] + cursor = null + start = "b_#{@id}-".length + end = "_settings".length + until cursor == 0 + [next, found] = await @connection.__runCommand__ ["scan", (cursor ? 0), "match", "b_#{@id}-*_settings", "count", 10000] + cursor = ~~next + keys.push(k.slice(start, -end)) for k in found + keys + + _startAutoCleanup: -> + clearInterval @interval + (@interval = setInterval => + time = Date.now() + for k, v of @instances + try if await v._store.__groupCheck__(time) then @deleteKey k + catch e then v.Events.trigger "error", e + , (@timeout / 2)).unref?() + + updateSettings: (options={}) -> + parser.overwrite options, @defaults, @ + parser.overwrite options, options, @limiterOptions + @_startAutoCleanup() if options.timeout? + + disconnect: (flush=true) -> + if !@sharedConnection + @connection?.disconnect flush + +module.exports = Group diff --git a/node_modules/bottleneck/src/IORedisConnection.coffee b/node_modules/bottleneck/src/IORedisConnection.coffee new file mode 100644 index 000000000..211b12456 --- /dev/null +++ b/node_modules/bottleneck/src/IORedisConnection.coffee @@ -0,0 +1,84 @@ +parser = require "./parser" +Events = require "./Events" +Scripts = require "./Scripts" + +class IORedisConnection + datastore: "ioredis" + defaults: + Redis: null + clientOptions: {} + clusterNodes: null + client: null + Promise: Promise + Events: null + + constructor: (options={}) -> + parser.load options, @defaults, @ + @Redis ?= eval("require")("ioredis") # Obfuscated or else Webpack/Angular will try to inline the optional ioredis module. To override this behavior: pass the ioredis module to Bottleneck as the 'Redis' option. + @Events ?= new Events @ + @terminated = false + + if @clusterNodes? + @client = new @Redis.Cluster @clusterNodes, @clientOptions + @subscriber = new @Redis.Cluster @clusterNodes, @clientOptions + else if @client? and !@client.duplicate? + @subscriber = new @Redis.Cluster @client.startupNodes, @client.options + else + @client ?= new @Redis @clientOptions + @subscriber = @client.duplicate() + @limiters = {} + + @ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)] + .then => + @_loadScripts() + { @client, @subscriber } + + _setup: (client, sub) -> + client.setMaxListeners 0 + new @Promise (resolve, reject) => + client.on "error", (e) => @Events.trigger "error", e + if sub + client.on "message", (channel, message) => + @limiters[channel]?._store.onMessage channel, message + if client.status == "ready" then resolve() + else client.once "ready", resolve + + _loadScripts: -> Scripts.names.forEach (name) => @client.defineCommand name, { lua: Scripts.payload(name) } + + __runCommand__: (cmd) -> + await @ready + [[_, deleted]] = await @client.pipeline([cmd]).exec() + deleted + + __addLimiter__: (instance) -> + @Promise.all [instance.channel(), instance.channel_client()].map (channel) => + new @Promise (resolve, reject) => + @subscriber.subscribe channel, => + @limiters[channel] = instance + resolve() + + __removeLimiter__: (instance) -> + [instance.channel(), instance.channel_client()].forEach (channel) => + await @subscriber.unsubscribe channel unless @terminated + delete @limiters[channel] + + __scriptArgs__: (name, id, args, cb) -> + keys = Scripts.keys name, id + [keys.length].concat keys, args, cb + + __scriptFn__: (name) -> + @client[name].bind(@client) + + disconnect: (flush=true) -> + clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters + @limiters = {} + @terminated = true + + if flush + @Promise.all [@client.quit(), @subscriber.quit()] + else + @client.disconnect() + @subscriber.disconnect() + @Promise.resolve() + +module.exports = IORedisConnection diff --git a/node_modules/bottleneck/src/Job.coffee b/node_modules/bottleneck/src/Job.coffee new file mode 100644 index 000000000..32cf1bccb --- /dev/null +++ b/node_modules/bottleneck/src/Job.coffee @@ -0,0 +1,98 @@ +NUM_PRIORITIES = 10 +DEFAULT_PRIORITY = 5 + +parser = require "./parser" +BottleneckError = require "./BottleneckError" + +class Job + constructor: (@task, @args, options, jobDefaults, @rejectOnDrop, @Events, @_states, @Promise) -> + @options = parser.load options, jobDefaults + @options.priority = @_sanitizePriority @options.priority + if @options.id == jobDefaults.id then @options.id = "#{@options.id}-#{@_randomIndex()}" + @promise = new @Promise (@_resolve, @_reject) => + @retryCount = 0 + + _sanitizePriority: (priority) -> + sProperty = if ~~priority != priority then DEFAULT_PRIORITY else priority + if sProperty < 0 then 0 else if sProperty > NUM_PRIORITIES-1 then NUM_PRIORITIES-1 else sProperty + + _randomIndex: -> Math.random().toString(36).slice(2) + + doDrop: ({ error, message="This job has been dropped by Bottleneck" } = {}) -> + if @_states.remove @options.id + if @rejectOnDrop then @_reject (error ? new BottleneckError message) + @Events.trigger "dropped", { @args, @options, @task, @promise } + true + else + false + + _assertStatus: (expected) -> + status = @_states.jobStatus @options.id + if not (status == expected or (expected == "DONE" and status == null)) + throw new BottleneckError "Invalid job status #{status}, expected #{expected}. Please open an issue at https://github.com/SGrondin/bottleneck/issues" + + doReceive: () -> + @_states.start @options.id + @Events.trigger "received", { @args, @options } + + doQueue: (reachedHWM, blocked) -> + @_assertStatus "RECEIVED" + @_states.next @options.id + @Events.trigger "queued", { @args, @options, reachedHWM, blocked } + + doRun: () -> + if @retryCount == 0 + @_assertStatus "QUEUED" + @_states.next @options.id + else @_assertStatus "EXECUTING" + @Events.trigger "scheduled", { @args, @options } + + doExecute: (chained, clearGlobalState, run, free) -> + if @retryCount == 0 + @_assertStatus "RUNNING" + @_states.next @options.id + else @_assertStatus "EXECUTING" + eventInfo = { @args, @options, @retryCount } + @Events.trigger "executing", eventInfo + + try + passed = await if chained? + chained.schedule @options, @task, @args... + else @task @args... + + if clearGlobalState() + @doDone eventInfo + await free @options, eventInfo + @_assertStatus "DONE" + @_resolve passed + catch error + @_onFailure error, eventInfo, clearGlobalState, run, free + + doExpire: (clearGlobalState, run, free) -> + if @_states.jobStatus @options.id == "RUNNING" + @_states.next @options.id + @_assertStatus "EXECUTING" + eventInfo = { @args, @options, @retryCount } + error = new BottleneckError "This job timed out after #{@options.expiration} ms." + @_onFailure error, eventInfo, clearGlobalState, run, free + + _onFailure: (error, eventInfo, clearGlobalState, run, free) -> + if clearGlobalState() + retry = await @Events.trigger "failed", error, eventInfo + if retry? + retryAfter = ~~retry + @Events.trigger "retry", "Retrying #{@options.id} after #{retryAfter} ms", eventInfo + @retryCount++ + run retryAfter + else + @doDone eventInfo + await free @options, eventInfo + @_assertStatus "DONE" + @_reject error + + doDone: (eventInfo) -> + @_assertStatus "EXECUTING" + @_states.next @options.id + @Events.trigger "done", eventInfo + +module.exports = Job diff --git a/node_modules/bottleneck/src/LocalDatastore.coffee b/node_modules/bottleneck/src/LocalDatastore.coffee new file mode 100644 index 000000000..690aa34fc --- /dev/null +++ b/node_modules/bottleneck/src/LocalDatastore.coffee @@ -0,0 +1,140 @@ +parser = require "./parser" +BottleneckError = require "./BottleneckError" + +class LocalDatastore + constructor: (@instance, @storeOptions, storeInstanceOptions) -> + @clientId = @instance._randomIndex() + parser.load storeInstanceOptions, storeInstanceOptions, @ + @_nextRequest = @_lastReservoirRefresh = @_lastReservoirIncrease = Date.now() + @_running = 0 + @_done = 0 + @_unblockTime = 0 + @ready = @Promise.resolve() + @clients = {} + @_startHeartbeat() + + _startHeartbeat: -> + if !@heartbeat? and (( + @storeOptions.reservoirRefreshInterval? and @storeOptions.reservoirRefreshAmount? + ) or ( + @storeOptions.reservoirIncreaseInterval? and @storeOptions.reservoirIncreaseAmount? + )) + (@heartbeat = setInterval => + now = Date.now() + + if @storeOptions.reservoirRefreshInterval? and now >= @_lastReservoirRefresh + @storeOptions.reservoirRefreshInterval + @_lastReservoirRefresh = now + @storeOptions.reservoir = @storeOptions.reservoirRefreshAmount + @instance._drainAll @computeCapacity() + + if @storeOptions.reservoirIncreaseInterval? and now >= @_lastReservoirIncrease + @storeOptions.reservoirIncreaseInterval + { reservoirIncreaseAmount: amount, reservoirIncreaseMaximum: maximum, reservoir } = @storeOptions + @_lastReservoirIncrease = now + incr = if maximum? then Math.min amount, maximum - reservoir else amount + if incr > 0 + @storeOptions.reservoir += incr + @instance._drainAll @computeCapacity() + + , @heartbeatInterval).unref?() + else clearInterval @heartbeat + + __publish__: (message) -> + await @yieldLoop() + @instance.Events.trigger "message", message.toString() + + __disconnect__: (flush) -> + await @yieldLoop() + clearInterval @heartbeat + @Promise.resolve() + + yieldLoop: (t=0) -> new @Promise (resolve, reject) -> setTimeout resolve, t + + computePenalty: -> @storeOptions.penalty ? ((15 * @storeOptions.minTime) or 5000) + + __updateSettings__: (options) -> + await @yieldLoop() + parser.overwrite options, options, @storeOptions + @_startHeartbeat() + @instance._drainAll @computeCapacity() + true + + __running__: -> + await @yieldLoop() + @_running + + __queued__: -> + await @yieldLoop() + @instance.queued() + + __done__: -> + await @yieldLoop() + @_done + + __groupCheck__: (time) -> + await @yieldLoop() + (@_nextRequest + @timeout) < time + + computeCapacity: -> + { maxConcurrent, reservoir } = @storeOptions + if maxConcurrent? and reservoir? then Math.min((maxConcurrent - @_running), reservoir) + else if maxConcurrent? then maxConcurrent - @_running + else if reservoir? then reservoir + else null + + conditionsCheck: (weight) -> + capacity = @computeCapacity() + not capacity? or weight <= capacity + + __incrementReservoir__: (incr) -> + await @yieldLoop() + reservoir = @storeOptions.reservoir += incr + @instance._drainAll @computeCapacity() + reservoir + + __currentReservoir__: -> + await @yieldLoop() + @storeOptions.reservoir + + isBlocked: (now) -> @_unblockTime >= now + + check: (weight, now) -> @conditionsCheck(weight) and (@_nextRequest - now) <= 0 + + __check__: (weight) -> + await @yieldLoop() + now = Date.now() + @check weight, now + + __register__: (index, weight, expiration) -> + await @yieldLoop() + now = Date.now() + if @conditionsCheck weight + @_running += weight + if @storeOptions.reservoir? then @storeOptions.reservoir -= weight + wait = Math.max @_nextRequest - now, 0 + @_nextRequest = now + wait + @storeOptions.minTime + { success: true, wait, reservoir: @storeOptions.reservoir } + else { success: false } + + strategyIsBlock: -> @storeOptions.strategy == 3 + + __submit__: (queueLength, weight) -> + await @yieldLoop() + if @storeOptions.maxConcurrent? and weight > @storeOptions.maxConcurrent + throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{@storeOptions.maxConcurrent}") + now = Date.now() + reachedHWM = @storeOptions.highWater? and queueLength == @storeOptions.highWater and not @check(weight, now) + blocked = @strategyIsBlock() and (reachedHWM or @isBlocked now) + if blocked + @_unblockTime = now + @computePenalty() + @_nextRequest = @_unblockTime + @storeOptions.minTime + @instance._dropAllQueued() + { reachedHWM, blocked, strategy: @storeOptions.strategy } + + __free__: (index, weight) -> + await @yieldLoop() + @_running -= weight + @_done += weight + @instance._drainAll @computeCapacity() + { running: @_running } + +module.exports = LocalDatastore diff --git a/node_modules/bottleneck/src/Queues.coffee b/node_modules/bottleneck/src/Queues.coffee new file mode 100644 index 000000000..b563ae361 --- /dev/null +++ b/node_modules/bottleneck/src/Queues.coffee @@ -0,0 +1,28 @@ +DLList = require "./DLList" +Events = require "./Events" + +class Queues + + constructor: (num_priorities) -> + @Events = new Events @ + @_length = 0 + @_lists = for i in [1..num_priorities] then new DLList (=> @incr()), (=> @decr()) + + incr: -> if @_length++ == 0 then @Events.trigger "leftzero" + + decr: -> if --@_length == 0 then @Events.trigger "zero" + + push: (job) -> @_lists[job.options.priority].push job + + queued: (priority) -> if priority? then @_lists[priority].length else @_length + + shiftAll: (fn) -> @_lists.forEach (list) -> list.forEachShift fn + + getFirst: (arr=@_lists) -> + for list in arr + return list if list.length > 0 + [] + + shiftLastFrom: (priority) -> @getFirst(@_lists[priority..].reverse()).shift() + +module.exports = Queues diff --git a/node_modules/bottleneck/src/RedisConnection.coffee b/node_modules/bottleneck/src/RedisConnection.coffee new file mode 100644 index 000000000..15379ef68 --- /dev/null +++ b/node_modules/bottleneck/src/RedisConnection.coffee @@ -0,0 +1,91 @@ +parser = require "./parser" +Events = require "./Events" +Scripts = require "./Scripts" + +class RedisConnection + datastore: "redis" + defaults: + Redis: null + clientOptions: {} + client: null + Promise: Promise + Events: null + + constructor: (options={}) -> + parser.load options, @defaults, @ + @Redis ?= eval("require")("redis") # Obfuscated or else Webpack/Angular will try to inline the optional redis module. To override this behavior: pass the redis module to Bottleneck as the 'Redis' option. + @Events ?= new Events @ + @terminated = false + + @client ?= @Redis.createClient @clientOptions + @subscriber = @client.duplicate() + @limiters = {} + @shas = {} + + @ready = @Promise.all [@_setup(@client, false), @_setup(@subscriber, true)] + .then => @_loadScripts() + .then => { @client, @subscriber } + + _setup: (client, sub) -> + client.setMaxListeners 0 + new @Promise (resolve, reject) => + client.on "error", (e) => @Events.trigger "error", e + if sub + client.on "message", (channel, message) => + @limiters[channel]?._store.onMessage channel, message + if client.ready then resolve() + else client.once "ready", resolve + + _loadScript: (name) -> + new @Promise (resolve, reject) => + payload = Scripts.payload name + @client.multi([["script", "load", payload]]).exec (err, replies) => + if err? then return reject err + @shas[name] = replies[0] + resolve replies[0] + + _loadScripts: -> @Promise.all(Scripts.names.map (k) => @_loadScript k) + + __runCommand__: (cmd) -> + await @ready + new @Promise (resolve, reject) => + @client.multi([cmd]).exec_atomic (err, replies) -> + if err? then reject(err) else resolve(replies[0]) + + __addLimiter__: (instance) -> + @Promise.all [instance.channel(), instance.channel_client()].map (channel) => + new @Promise (resolve, reject) => + handler = (chan) => + if chan == channel + @subscriber.removeListener "subscribe", handler + @limiters[channel] = instance + resolve() + @subscriber.on "subscribe", handler + @subscriber.subscribe channel + + __removeLimiter__: (instance) -> + @Promise.all [instance.channel(), instance.channel_client()].map (channel) => + unless @terminated + await new @Promise (resolve, reject) => + @subscriber.unsubscribe channel, (err, chan) -> + if err? then return reject err + if chan == channel then return resolve() + delete @limiters[channel] + + __scriptArgs__: (name, id, args, cb) -> + keys = Scripts.keys name, id + [@shas[name], keys.length].concat keys, args, cb + + __scriptFn__: (name) -> + @client.evalsha.bind(@client) + + disconnect: (flush=true) -> + clearInterval(@limiters[k]._store.heartbeat) for k in Object.keys @limiters + @limiters = {} + @terminated = true + + @client.end flush + @subscriber.end flush + @Promise.resolve() + +module.exports = RedisConnection diff --git a/node_modules/bottleneck/src/RedisDatastore.coffee b/node_modules/bottleneck/src/RedisDatastore.coffee new file mode 100644 index 000000000..4a2154f21 --- /dev/null +++ b/node_modules/bottleneck/src/RedisDatastore.coffee @@ -0,0 +1,158 @@ +parser = require "./parser" +BottleneckError = require "./BottleneckError" +RedisConnection = require "./RedisConnection" +IORedisConnection = require "./IORedisConnection" + +class RedisDatastore + constructor: (@instance, @storeOptions, storeInstanceOptions) -> + @originalId = @instance.id + @clientId = @instance._randomIndex() + parser.load storeInstanceOptions, storeInstanceOptions, @ + @clients = {} + @capacityPriorityCounters = {} + @sharedConnection = @connection? + + @connection ?= if @instance.datastore == "redis" then new RedisConnection { @Redis, @clientOptions, @Promise, Events: @instance.Events } + else if @instance.datastore == "ioredis" then new IORedisConnection { @Redis, @clientOptions, @clusterNodes, @Promise, Events: @instance.Events } + + @instance.connection = @connection + @instance.datastore = @connection.datastore + + @ready = @connection.ready + .then (@clients) => @runScript "init", @prepareInitSettings @clearDatastore + .then => @connection.__addLimiter__ @instance + .then => @runScript "register_client", [@instance.queued()] + .then => + (@heartbeat = setInterval => + @runScript "heartbeat", [] + .catch (e) => @instance.Events.trigger "error", e + , @heartbeatInterval).unref?() + @clients + + __publish__: (message) -> + { client } = await @ready + client.publish(@instance.channel(), "message:#{message.toString()}") + + onMessage: (channel, message) -> + try + pos = message.indexOf(":") + [type, data] = [message.slice(0, pos), message.slice(pos+1)] + if type == "capacity" + await @instance._drainAll(if data.length > 0 then ~~data) + else if type == "capacity-priority" + [rawCapacity, priorityClient, counter] = data.split(":") + capacity = if rawCapacity.length > 0 then ~~rawCapacity + if priorityClient == @clientId + drained = await @instance._drainAll(capacity) + newCapacity = if capacity? then capacity - (drained or 0) else "" + await @clients.client.publish(@instance.channel(), "capacity-priority:#{newCapacity}::#{counter}") + else if priorityClient == "" + clearTimeout @capacityPriorityCounters[counter] + delete @capacityPriorityCounters[counter] + @instance._drainAll(capacity) + else + @capacityPriorityCounters[counter] = setTimeout => + try + delete @capacityPriorityCounters[counter] + await @runScript "blacklist_client", [priorityClient] + await @instance._drainAll(capacity) + catch e then @instance.Events.trigger "error", e + , 1000 + else if type == "message" + @instance.Events.trigger "message", data + else if type == "blocked" + await @instance._dropAllQueued() + catch e then @instance.Events.trigger "error", e + + __disconnect__: (flush) -> + clearInterval @heartbeat + if @sharedConnection + @connection.__removeLimiter__ @instance + else + @connection.disconnect flush + + runScript: (name, args) -> + await @ready unless name == "init" or name == "register_client" + new @Promise (resolve, reject) => + all_args = [Date.now(), @clientId].concat args + @instance.Events.trigger "debug", "Calling Redis script: #{name}.lua", all_args + arr = @connection.__scriptArgs__ name, @originalId, all_args, (err, replies) -> + if err? then return reject err + return resolve replies + @connection.__scriptFn__(name) arr... + .catch (e) => + if e.message == "SETTINGS_KEY_NOT_FOUND" + if name == "heartbeat" then @Promise.resolve() + else + @runScript("init", @prepareInitSettings(false)) + .then => @runScript(name, args) + else if e.message == "UNKNOWN_CLIENT" + @runScript("register_client", [@instance.queued()]) + .then => @runScript(name, args) + else @Promise.reject e + + prepareArray: (arr) -> (if x? then x.toString() else "") for x in arr + + prepareObject: (obj) -> + arr = [] + for k, v of obj then arr.push k, (if v? then v.toString() else "") + arr + + prepareInitSettings: (clear) -> + args = @prepareObject Object.assign({}, @storeOptions, { + id: @originalId + version: @instance.version + groupTimeout: @timeout + @clientTimeout + }) + args.unshift (if clear then 1 else 0), @instance.version + args + + convertBool: (b) -> !!b + + __updateSettings__: (options) -> + await @runScript "update_settings", @prepareObject options + parser.overwrite options, options, @storeOptions + + __running__: -> @runScript "running", [] + + __queued__: -> @runScript "queued", [] + + __done__: -> @runScript "done", [] + + __groupCheck__: -> @convertBool await @runScript "group_check", [] + + __incrementReservoir__: (incr) -> @runScript "increment_reservoir", [incr] + + __currentReservoir__: -> @runScript "current_reservoir", [] + + __check__: (weight) -> @convertBool await @runScript "check", @prepareArray [weight] + + __register__: (index, weight, expiration) -> + [success, wait, reservoir] = await @runScript "register", @prepareArray [index, weight, expiration] + return { + success: @convertBool(success), + wait, + reservoir + } + + __submit__: (queueLength, weight) -> + try + [reachedHWM, blocked, strategy] = await @runScript "submit", @prepareArray [queueLength, weight] + return { + reachedHWM: @convertBool(reachedHWM), + blocked: @convertBool(blocked), + strategy + } + catch e + if e.message.indexOf("OVERWEIGHT") == 0 + [overweight, weight, maxConcurrent] = e.message.split ":" + throw new BottleneckError("Impossible to add a job having a weight of #{weight} to a limiter having a maxConcurrent setting of #{maxConcurrent}") + else + throw e + + __free__: (index, weight) -> + running = await @runScript "free", @prepareArray [index] + return { running } + +module.exports = RedisDatastore diff --git a/node_modules/bottleneck/src/Scripts.coffee b/node_modules/bottleneck/src/Scripts.coffee new file mode 100644 index 000000000..d614abf05 --- /dev/null +++ b/node_modules/bottleneck/src/Scripts.coffee @@ -0,0 +1,151 @@ +lua = require "./lua.json" + +headers = + refs: lua["refs.lua"] + validate_keys: lua["validate_keys.lua"] + validate_client: lua["validate_client.lua"] + refresh_expiration: lua["refresh_expiration.lua"] + process_tick: lua["process_tick.lua"] + conditions_check: lua["conditions_check.lua"] + get_time: lua["get_time.lua"] + +exports.allKeys = (id) -> [ + ### + HASH + ### + "b_#{id}_settings" + + ### + HASH + job index -> weight + ### + "b_#{id}_job_weights" + + ### + ZSET + job index -> expiration + ### + "b_#{id}_job_expirations" + + ### + HASH + job index -> client + ### + "b_#{id}_job_clients" + + ### + ZSET + client -> sum running + ### + "b_#{id}_client_running" + + ### + HASH + client -> num queued + ### + "b_#{id}_client_num_queued" + + ### + ZSET + client -> last job registered + ### + "b_#{id}_client_last_registered" + + ### + ZSET + client -> last seen + ### + "b_#{id}_client_last_seen" +] + +templates = + init: + keys: exports.allKeys + headers: ["process_tick"] + refresh_expiration: true + code: lua["init.lua"] + group_check: + keys: exports.allKeys + headers: [] + refresh_expiration: false + code: lua["group_check.lua"] + register_client: + keys: exports.allKeys + headers: ["validate_keys"] + refresh_expiration: false + code: lua["register_client.lua"] + blacklist_client: + keys: exports.allKeys + headers: ["validate_keys", "validate_client"] + refresh_expiration: false + code: lua["blacklist_client.lua"] + heartbeat: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick"] + refresh_expiration: false + code: lua["heartbeat.lua"] + update_settings: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick"] + refresh_expiration: true + code: lua["update_settings.lua"] + running: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick"] + refresh_expiration: false + code: lua["running.lua"] + queued: + keys: exports.allKeys + headers: ["validate_keys", "validate_client"] + refresh_expiration: false + code: lua["queued.lua"] + done: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick"] + refresh_expiration: false + code: lua["done.lua"] + check: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"] + refresh_expiration: false + code: lua["check.lua"] + submit: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"] + refresh_expiration: true + code: lua["submit.lua"] + register: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick", "conditions_check"] + refresh_expiration: true + code: lua["register.lua"] + free: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick"] + refresh_expiration: true + code: lua["free.lua"] + current_reservoir: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick"] + refresh_expiration: false + code: lua["current_reservoir.lua"] + increment_reservoir: + keys: exports.allKeys + headers: ["validate_keys", "validate_client", "process_tick"] + refresh_expiration: true + code: lua["increment_reservoir.lua"] + +exports.names = Object.keys templates + +exports.keys = (name, id) -> + templates[name].keys id + +exports.payload = (name) -> + template = templates[name] + Array::concat( + headers.refs, + template.headers.map((h) -> headers[h]), + (if template.refresh_expiration then headers.refresh_expiration else ""), + template.code + ) + .join("\n") diff --git a/node_modules/bottleneck/src/States.coffee b/node_modules/bottleneck/src/States.coffee new file mode 100644 index 000000000..c382c3dd8 --- /dev/null +++ b/node_modules/bottleneck/src/States.coffee @@ -0,0 +1,43 @@ +BottleneckError = require "./BottleneckError" +class States + constructor: (@status) -> + @_jobs = {} + @counts = @status.map(-> 0) + + next: (id) -> + current = @_jobs[id] + next = current + 1 + if current? and next < @status.length + @counts[current]-- + @counts[next]++ + @_jobs[id]++ + else if current? + @counts[current]-- + delete @_jobs[id] + + start: (id) -> + initial = 0 + @_jobs[id] = initial + @counts[initial]++ + + remove: (id) -> + current = @_jobs[id] + if current? + @counts[current]-- + delete @_jobs[id] + current? + + jobStatus: (id) -> @status[@_jobs[id]] ? null + + statusJobs: (status) -> + if status? + pos = @status.indexOf status + if pos < 0 + throw new BottleneckError "status must be one of #{@status.join ', '}" + k for k,v of @_jobs when v == pos + else + Object.keys @_jobs + + statusCounts: -> @counts.reduce(((acc, v, i) => acc[@status[i]] = v; acc), {}) + +module.exports = States diff --git a/node_modules/bottleneck/src/Sync.coffee b/node_modules/bottleneck/src/Sync.coffee new file mode 100644 index 000000000..9df45135f --- /dev/null +++ b/node_modules/bottleneck/src/Sync.coffee @@ -0,0 +1,28 @@ +DLList = require "./DLList" +class Sync + constructor: (@name, @Promise) -> + @_running = 0 + @_queue = new DLList() + isEmpty: -> @_queue.length == 0 + _tryToRun: -> + if (@_running < 1) and @_queue.length > 0 + @_running++ + { task, args, resolve, reject } = @_queue.shift() + cb = try + returned = await task args... + () -> resolve returned + catch error + () -> reject error + @_running-- + @_tryToRun() + cb() + schedule: (task, args...) => + resolve = reject = null + promise = new @Promise (_resolve, _reject) -> + resolve = _resolve + reject = _reject + @_queue.push { task, args, resolve, reject } + @_tryToRun() + promise + +module.exports = Sync diff --git a/node_modules/bottleneck/src/es5.coffee b/node_modules/bottleneck/src/es5.coffee new file mode 100644 index 000000000..12761a733 --- /dev/null +++ b/node_modules/bottleneck/src/es5.coffee @@ -0,0 +1,3 @@ +require("regenerator-runtime/runtime") + +module.exports = require "./Bottleneck" diff --git a/node_modules/bottleneck/src/index.coffee b/node_modules/bottleneck/src/index.coffee new file mode 100644 index 000000000..7a7fcb207 --- /dev/null +++ b/node_modules/bottleneck/src/index.coffee @@ -0,0 +1 @@ +module.exports = require "./Bottleneck" diff --git a/node_modules/bottleneck/src/parser.coffee b/node_modules/bottleneck/src/parser.coffee new file mode 100644 index 000000000..b662fb1e2 --- /dev/null +++ b/node_modules/bottleneck/src/parser.coffee @@ -0,0 +1,10 @@ +exports.load = (received, defaults, onto={}) -> + for k, v of defaults + onto[k] = received[k] ? v + onto + +exports.overwrite = (received, defaults, onto={}) -> + for k, v of received + if defaults[k] != undefined + onto[k] = v + onto diff --git a/node_modules/bottleneck/src/redis/blacklist_client.lua b/node_modules/bottleneck/src/redis/blacklist_client.lua new file mode 100644 index 000000000..953ae54ba --- /dev/null +++ b/node_modules/bottleneck/src/redis/blacklist_client.lua @@ -0,0 +1,8 @@ +local blacklist = ARGV[num_static_argv + 1] + +if redis.call('zscore', client_last_seen_key, blacklist) then + redis.call('zadd', client_last_seen_key, 0, blacklist) +end + + +return {} diff --git a/node_modules/bottleneck/src/redis/check.lua b/node_modules/bottleneck/src/redis/check.lua new file mode 100644 index 000000000..556e36548 --- /dev/null +++ b/node_modules/bottleneck/src/redis/check.lua @@ -0,0 +1,6 @@ +local weight = tonumber(ARGV[num_static_argv + 1]) + +local capacity = process_tick(now, false)['capacity'] +local nextRequest = tonumber(redis.call('hget', settings_key, 'nextRequest')) + +return conditions_check(capacity, weight) and nextRequest - now <= 0 diff --git a/node_modules/bottleneck/src/redis/conditions_check.lua b/node_modules/bottleneck/src/redis/conditions_check.lua new file mode 100644 index 000000000..c46fff56e --- /dev/null +++ b/node_modules/bottleneck/src/redis/conditions_check.lua @@ -0,0 +1,3 @@ +local conditions_check = function (capacity, weight) + return capacity == nil or weight <= capacity +end diff --git a/node_modules/bottleneck/src/redis/current_reservoir.lua b/node_modules/bottleneck/src/redis/current_reservoir.lua new file mode 100644 index 000000000..cdfca4452 --- /dev/null +++ b/node_modules/bottleneck/src/redis/current_reservoir.lua @@ -0,0 +1 @@ +return process_tick(now, false)['reservoir'] diff --git a/node_modules/bottleneck/src/redis/done.lua b/node_modules/bottleneck/src/redis/done.lua new file mode 100644 index 000000000..99b725023 --- /dev/null +++ b/node_modules/bottleneck/src/redis/done.lua @@ -0,0 +1,3 @@ +process_tick(now, false) + +return tonumber(redis.call('hget', settings_key, 'done')) diff --git a/node_modules/bottleneck/src/redis/free.lua b/node_modules/bottleneck/src/redis/free.lua new file mode 100644 index 000000000..33df5588d --- /dev/null +++ b/node_modules/bottleneck/src/redis/free.lua @@ -0,0 +1,5 @@ +local index = ARGV[num_static_argv + 1] + +redis.call('zadd', job_expirations_key, 0, index) + +return process_tick(now, false)['running'] diff --git a/node_modules/bottleneck/src/redis/get_time.lua b/node_modules/bottleneck/src/redis/get_time.lua new file mode 100644 index 000000000..26ba3560c --- /dev/null +++ b/node_modules/bottleneck/src/redis/get_time.lua @@ -0,0 +1,7 @@ +redis.replicate_commands() + +local get_time = function () + local time = redis.call('time') + + return tonumber(time[1]..string.sub(time[2], 1, 3)) +end diff --git a/node_modules/bottleneck/src/redis/group_check.lua b/node_modules/bottleneck/src/redis/group_check.lua new file mode 100644 index 000000000..0fd4f9027 --- /dev/null +++ b/node_modules/bottleneck/src/redis/group_check.lua @@ -0,0 +1 @@ +return not (redis.call('exists', settings_key) == 1) diff --git a/node_modules/bottleneck/src/redis/heartbeat.lua b/node_modules/bottleneck/src/redis/heartbeat.lua new file mode 100644 index 000000000..38aa599ba --- /dev/null +++ b/node_modules/bottleneck/src/redis/heartbeat.lua @@ -0,0 +1 @@ +process_tick(now, true) diff --git a/node_modules/bottleneck/src/redis/increment_reservoir.lua b/node_modules/bottleneck/src/redis/increment_reservoir.lua new file mode 100644 index 000000000..495ddc746 --- /dev/null +++ b/node_modules/bottleneck/src/redis/increment_reservoir.lua @@ -0,0 +1,10 @@ +local incr = tonumber(ARGV[num_static_argv + 1]) + +redis.call('hincrby', settings_key, 'reservoir', incr) + +local reservoir = process_tick(now, true)['reservoir'] + +local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout')) +refresh_expiration(0, 0, groupTimeout) + +return reservoir diff --git a/node_modules/bottleneck/src/redis/init.lua b/node_modules/bottleneck/src/redis/init.lua new file mode 100644 index 000000000..c8546706c --- /dev/null +++ b/node_modules/bottleneck/src/redis/init.lua @@ -0,0 +1,105 @@ +local clear = tonumber(ARGV[num_static_argv + 1]) +local limiter_version = ARGV[num_static_argv + 2] +local num_local_argv = num_static_argv + 2 + +if clear == 1 then + redis.call('del', unpack(KEYS)) +end + +if redis.call('exists', settings_key) == 0 then + -- Create + local args = {'hmset', settings_key} + + for i = num_local_argv + 1, #ARGV do + table.insert(args, ARGV[i]) + end + + redis.call(unpack(args)) + redis.call('hmset', settings_key, + 'nextRequest', now, + 'lastReservoirRefresh', now, + 'lastReservoirIncrease', now, + 'running', 0, + 'done', 0, + 'unblockTime', 0, + 'capacityPriorityCounter', 0 + ) + +else + -- Apply migrations + local settings = redis.call('hmget', settings_key, + 'id', + 'version' + ) + local id = settings[1] + local current_version = settings[2] + + if current_version ~= limiter_version then + local version_digits = {} + for k, v in string.gmatch(current_version, "([^.]+)") do + table.insert(version_digits, tonumber(k)) + end + + -- 2.10.0 + if version_digits[2] < 10 then + redis.call('hsetnx', settings_key, 'reservoirRefreshInterval', '') + redis.call('hsetnx', settings_key, 'reservoirRefreshAmount', '') + redis.call('hsetnx', settings_key, 'lastReservoirRefresh', '') + redis.call('hsetnx', settings_key, 'done', 0) + redis.call('hset', settings_key, 'version', '2.10.0') + end + + -- 2.11.1 + if version_digits[2] < 11 or (version_digits[2] == 11 and version_digits[3] < 1) then + if redis.call('hstrlen', settings_key, 'lastReservoirRefresh') == 0 then + redis.call('hmset', settings_key, + 'lastReservoirRefresh', now, + 'version', '2.11.1' + ) + end + end + + -- 2.14.0 + if version_digits[2] < 14 then + local old_running_key = 'b_'..id..'_running' + local old_executing_key = 'b_'..id..'_executing' + + if redis.call('exists', old_running_key) == 1 then + redis.call('rename', old_running_key, job_weights_key) + end + if redis.call('exists', old_executing_key) == 1 then + redis.call('rename', old_executing_key, job_expirations_key) + end + redis.call('hset', settings_key, 'version', '2.14.0') + end + + -- 2.15.2 + if version_digits[2] < 15 or (version_digits[2] == 15 and version_digits[3] < 2) then + redis.call('hsetnx', settings_key, 'capacityPriorityCounter', 0) + redis.call('hset', settings_key, 'version', '2.15.2') + end + + -- 2.17.0 + if version_digits[2] < 17 then + redis.call('hsetnx', settings_key, 'clientTimeout', 10000) + redis.call('hset', settings_key, 'version', '2.17.0') + end + + -- 2.18.0 + if version_digits[2] < 18 then + redis.call('hsetnx', settings_key, 'reservoirIncreaseInterval', '') + redis.call('hsetnx', settings_key, 'reservoirIncreaseAmount', '') + redis.call('hsetnx', settings_key, 'reservoirIncreaseMaximum', '') + redis.call('hsetnx', settings_key, 'lastReservoirIncrease', now) + redis.call('hset', settings_key, 'version', '2.18.0') + end + + end + + process_tick(now, false) +end + +local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout')) +refresh_expiration(0, 0, groupTimeout) + +return {} diff --git a/node_modules/bottleneck/src/redis/process_tick.lua b/node_modules/bottleneck/src/redis/process_tick.lua new file mode 100644 index 000000000..ba7b6da6e --- /dev/null +++ b/node_modules/bottleneck/src/redis/process_tick.lua @@ -0,0 +1,214 @@ +local process_tick = function (now, always_publish) + + local compute_capacity = function (maxConcurrent, running, reservoir) + if maxConcurrent ~= nil and reservoir ~= nil then + return math.min((maxConcurrent - running), reservoir) + elseif maxConcurrent ~= nil then + return maxConcurrent - running + elseif reservoir ~= nil then + return reservoir + else + return nil + end + end + + local settings = redis.call('hmget', settings_key, + 'id', + 'maxConcurrent', + 'running', + 'reservoir', + 'reservoirRefreshInterval', + 'reservoirRefreshAmount', + 'lastReservoirRefresh', + 'reservoirIncreaseInterval', + 'reservoirIncreaseAmount', + 'reservoirIncreaseMaximum', + 'lastReservoirIncrease', + 'capacityPriorityCounter', + 'clientTimeout' + ) + local id = settings[1] + local maxConcurrent = tonumber(settings[2]) + local running = tonumber(settings[3]) + local reservoir = tonumber(settings[4]) + local reservoirRefreshInterval = tonumber(settings[5]) + local reservoirRefreshAmount = tonumber(settings[6]) + local lastReservoirRefresh = tonumber(settings[7]) + local reservoirIncreaseInterval = tonumber(settings[8]) + local reservoirIncreaseAmount = tonumber(settings[9]) + local reservoirIncreaseMaximum = tonumber(settings[10]) + local lastReservoirIncrease = tonumber(settings[11]) + local capacityPriorityCounter = tonumber(settings[12]) + local clientTimeout = tonumber(settings[13]) + + local initial_capacity = compute_capacity(maxConcurrent, running, reservoir) + + -- + -- Process 'running' changes + -- + local expired = redis.call('zrangebyscore', job_expirations_key, '-inf', '('..now) + + if #expired > 0 then + redis.call('zremrangebyscore', job_expirations_key, '-inf', '('..now) + + local flush_batch = function (batch, acc) + local weights = redis.call('hmget', job_weights_key, unpack(batch)) + redis.call('hdel', job_weights_key, unpack(batch)) + local clients = redis.call('hmget', job_clients_key, unpack(batch)) + redis.call('hdel', job_clients_key, unpack(batch)) + + -- Calculate sum of removed weights + for i = 1, #weights do + acc['total'] = acc['total'] + (tonumber(weights[i]) or 0) + end + + -- Calculate sum of removed weights by client + local client_weights = {} + for i = 1, #clients do + local removed = tonumber(weights[i]) or 0 + if removed > 0 then + acc['client_weights'][clients[i]] = (acc['client_weights'][clients[i]] or 0) + removed + end + end + end + + local acc = { + ['total'] = 0, + ['client_weights'] = {} + } + local batch_size = 1000 + + -- Compute changes to Zsets and apply changes to Hashes + for i = 1, #expired, batch_size do + local batch = {} + for j = i, math.min(i + batch_size - 1, #expired) do + table.insert(batch, expired[j]) + end + + flush_batch(batch, acc) + end + + -- Apply changes to Zsets + if acc['total'] > 0 then + redis.call('hincrby', settings_key, 'done', acc['total']) + running = tonumber(redis.call('hincrby', settings_key, 'running', -acc['total'])) + end + + for client, weight in pairs(acc['client_weights']) do + redis.call('zincrby', client_running_key, -weight, client) + end + end + + -- + -- Process 'reservoir' changes + -- + local reservoirRefreshActive = reservoirRefreshInterval ~= nil and reservoirRefreshAmount ~= nil + if reservoirRefreshActive and now >= lastReservoirRefresh + reservoirRefreshInterval then + reservoir = reservoirRefreshAmount + redis.call('hmset', settings_key, + 'reservoir', reservoir, + 'lastReservoirRefresh', now + ) + end + + local reservoirIncreaseActive = reservoirIncreaseInterval ~= nil and reservoirIncreaseAmount ~= nil + if reservoirIncreaseActive and now >= lastReservoirIncrease + reservoirIncreaseInterval then + local num_intervals = math.floor((now - lastReservoirIncrease) / reservoirIncreaseInterval) + local incr = reservoirIncreaseAmount * num_intervals + if reservoirIncreaseMaximum ~= nil then + incr = math.min(incr, reservoirIncreaseMaximum - (reservoir or 0)) + end + if incr > 0 then + reservoir = (reservoir or 0) + incr + end + redis.call('hmset', settings_key, + 'reservoir', reservoir, + 'lastReservoirIncrease', lastReservoirIncrease + (num_intervals * reservoirIncreaseInterval) + ) + end + + -- + -- Clear unresponsive clients + -- + local unresponsive = redis.call('zrangebyscore', client_last_seen_key, '-inf', (now - clientTimeout)) + local unresponsive_lookup = {} + local terminated_clients = {} + for i = 1, #unresponsive do + unresponsive_lookup[unresponsive[i]] = true + if tonumber(redis.call('zscore', client_running_key, unresponsive[i])) == 0 then + table.insert(terminated_clients, unresponsive[i]) + end + end + if #terminated_clients > 0 then + redis.call('zrem', client_running_key, unpack(terminated_clients)) + redis.call('hdel', client_num_queued_key, unpack(terminated_clients)) + redis.call('zrem', client_last_registered_key, unpack(terminated_clients)) + redis.call('zrem', client_last_seen_key, unpack(terminated_clients)) + end + + -- + -- Broadcast capacity changes + -- + local final_capacity = compute_capacity(maxConcurrent, running, reservoir) + + if always_publish or (initial_capacity ~= nil and final_capacity == nil) then + -- always_publish or was not unlimited, now unlimited + redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or '')) + + elseif initial_capacity ~= nil and final_capacity ~= nil and final_capacity > initial_capacity then + -- capacity was increased + -- send the capacity message to the limiter having the lowest number of running jobs + -- the tiebreaker is the limiter having not registered a job in the longest time + + local lowest_concurrency_value = nil + local lowest_concurrency_clients = {} + local lowest_concurrency_last_registered = {} + local client_concurrencies = redis.call('zrange', client_running_key, 0, -1, 'withscores') + + for i = 1, #client_concurrencies, 2 do + local client = client_concurrencies[i] + local concurrency = tonumber(client_concurrencies[i+1]) + + if ( + lowest_concurrency_value == nil or lowest_concurrency_value == concurrency + ) and ( + not unresponsive_lookup[client] + ) and ( + tonumber(redis.call('hget', client_num_queued_key, client)) > 0 + ) then + lowest_concurrency_value = concurrency + table.insert(lowest_concurrency_clients, client) + local last_registered = tonumber(redis.call('zscore', client_last_registered_key, client)) + table.insert(lowest_concurrency_last_registered, last_registered) + end + end + + if #lowest_concurrency_clients > 0 then + local position = 1 + local earliest = lowest_concurrency_last_registered[1] + + for i,v in ipairs(lowest_concurrency_last_registered) do + if v < earliest then + position = i + earliest = v + end + end + + local next_client = lowest_concurrency_clients[position] + redis.call('publish', 'b_'..id, + 'capacity-priority:'..(final_capacity or '').. + ':'..next_client.. + ':'..capacityPriorityCounter + ) + redis.call('hincrby', settings_key, 'capacityPriorityCounter', '1') + else + redis.call('publish', 'b_'..id, 'capacity:'..(final_capacity or '')) + end + end + + return { + ['capacity'] = final_capacity, + ['running'] = running, + ['reservoir'] = reservoir + } +end diff --git a/node_modules/bottleneck/src/redis/queued.lua b/node_modules/bottleneck/src/redis/queued.lua new file mode 100644 index 000000000..45ae5245a --- /dev/null +++ b/node_modules/bottleneck/src/redis/queued.lua @@ -0,0 +1,10 @@ +local clientTimeout = tonumber(redis.call('hget', settings_key, 'clientTimeout')) +local valid_clients = redis.call('zrangebyscore', client_last_seen_key, (now - clientTimeout), 'inf') +local client_queued = redis.call('hmget', client_num_queued_key, unpack(valid_clients)) + +local sum = 0 +for i = 1, #client_queued do + sum = sum + tonumber(client_queued[i]) +end + +return sum diff --git a/node_modules/bottleneck/src/redis/refresh_expiration.lua b/node_modules/bottleneck/src/redis/refresh_expiration.lua new file mode 100644 index 000000000..79b88945c --- /dev/null +++ b/node_modules/bottleneck/src/redis/refresh_expiration.lua @@ -0,0 +1,11 @@ +local refresh_expiration = function (now, nextRequest, groupTimeout) + + if groupTimeout ~= nil then + local ttl = (nextRequest + groupTimeout) - now + + for i = 1, #KEYS do + redis.call('pexpire', KEYS[i], ttl) + end + end + +end diff --git a/node_modules/bottleneck/src/redis/refs.lua b/node_modules/bottleneck/src/redis/refs.lua new file mode 100644 index 000000000..daf89fe4c --- /dev/null +++ b/node_modules/bottleneck/src/redis/refs.lua @@ -0,0 +1,13 @@ +local settings_key = KEYS[1] +local job_weights_key = KEYS[2] +local job_expirations_key = KEYS[3] +local job_clients_key = KEYS[4] +local client_running_key = KEYS[5] +local client_num_queued_key = KEYS[6] +local client_last_registered_key = KEYS[7] +local client_last_seen_key = KEYS[8] + +local now = tonumber(ARGV[1]) +local client = ARGV[2] + +local num_static_argv = 2 diff --git a/node_modules/bottleneck/src/redis/register.lua b/node_modules/bottleneck/src/redis/register.lua new file mode 100644 index 000000000..37b225414 --- /dev/null +++ b/node_modules/bottleneck/src/redis/register.lua @@ -0,0 +1,51 @@ +local index = ARGV[num_static_argv + 1] +local weight = tonumber(ARGV[num_static_argv + 2]) +local expiration = tonumber(ARGV[num_static_argv + 3]) + +local state = process_tick(now, false) +local capacity = state['capacity'] +local reservoir = state['reservoir'] + +local settings = redis.call('hmget', settings_key, + 'nextRequest', + 'minTime', + 'groupTimeout' +) +local nextRequest = tonumber(settings[1]) +local minTime = tonumber(settings[2]) +local groupTimeout = tonumber(settings[3]) + +if conditions_check(capacity, weight) then + + redis.call('hincrby', settings_key, 'running', weight) + redis.call('hset', job_weights_key, index, weight) + if expiration ~= nil then + redis.call('zadd', job_expirations_key, now + expiration, index) + end + redis.call('hset', job_clients_key, index, client) + redis.call('zincrby', client_running_key, weight, client) + redis.call('hincrby', client_num_queued_key, client, -1) + redis.call('zadd', client_last_registered_key, now, client) + + local wait = math.max(nextRequest - now, 0) + local newNextRequest = now + wait + minTime + + if reservoir == nil then + redis.call('hset', settings_key, + 'nextRequest', newNextRequest + ) + else + reservoir = reservoir - weight + redis.call('hmset', settings_key, + 'reservoir', reservoir, + 'nextRequest', newNextRequest + ) + end + + refresh_expiration(now, newNextRequest, groupTimeout) + + return {true, wait, reservoir} + +else + return {false} +end diff --git a/node_modules/bottleneck/src/redis/register_client.lua b/node_modules/bottleneck/src/redis/register_client.lua new file mode 100644 index 000000000..20bae4253 --- /dev/null +++ b/node_modules/bottleneck/src/redis/register_client.lua @@ -0,0 +1,12 @@ +local queued = tonumber(ARGV[num_static_argv + 1]) + +-- Could have been re-registered concurrently +if not redis.call('zscore', client_last_seen_key, client) then + redis.call('zadd', client_running_key, 0, client) + redis.call('hset', client_num_queued_key, client, queued) + redis.call('zadd', client_last_registered_key, 0, client) +end + +redis.call('zadd', client_last_seen_key, now, client) + +return {} diff --git a/node_modules/bottleneck/src/redis/running.lua b/node_modules/bottleneck/src/redis/running.lua new file mode 100644 index 000000000..4d4794ab5 --- /dev/null +++ b/node_modules/bottleneck/src/redis/running.lua @@ -0,0 +1 @@ +return process_tick(now, false)['running'] diff --git a/node_modules/bottleneck/src/redis/submit.lua b/node_modules/bottleneck/src/redis/submit.lua new file mode 100644 index 000000000..9efeebe7c --- /dev/null +++ b/node_modules/bottleneck/src/redis/submit.lua @@ -0,0 +1,74 @@ +local queueLength = tonumber(ARGV[num_static_argv + 1]) +local weight = tonumber(ARGV[num_static_argv + 2]) + +local capacity = process_tick(now, false)['capacity'] + +local settings = redis.call('hmget', settings_key, + 'id', + 'maxConcurrent', + 'highWater', + 'nextRequest', + 'strategy', + 'unblockTime', + 'penalty', + 'minTime', + 'groupTimeout' +) +local id = settings[1] +local maxConcurrent = tonumber(settings[2]) +local highWater = tonumber(settings[3]) +local nextRequest = tonumber(settings[4]) +local strategy = tonumber(settings[5]) +local unblockTime = tonumber(settings[6]) +local penalty = tonumber(settings[7]) +local minTime = tonumber(settings[8]) +local groupTimeout = tonumber(settings[9]) + +if maxConcurrent ~= nil and weight > maxConcurrent then + return redis.error_reply('OVERWEIGHT:'..weight..':'..maxConcurrent) +end + +local reachedHWM = (highWater ~= nil and queueLength == highWater + and not ( + conditions_check(capacity, weight) + and nextRequest - now <= 0 + ) +) + +local blocked = strategy == 3 and (reachedHWM or unblockTime >= now) + +if blocked then + local computedPenalty = penalty + if computedPenalty == nil then + if minTime == 0 then + computedPenalty = 5000 + else + computedPenalty = 15 * minTime + end + end + + local newNextRequest = now + computedPenalty + minTime + + redis.call('hmset', settings_key, + 'unblockTime', now + computedPenalty, + 'nextRequest', newNextRequest + ) + + local clients_queued_reset = redis.call('hkeys', client_num_queued_key) + local queued_reset = {} + for i = 1, #clients_queued_reset do + table.insert(queued_reset, clients_queued_reset[i]) + table.insert(queued_reset, 0) + end + redis.call('hmset', client_num_queued_key, unpack(queued_reset)) + + redis.call('publish', 'b_'..id, 'blocked:') + + refresh_expiration(now, newNextRequest, groupTimeout) +end + +if not blocked and not reachedHWM then + redis.call('hincrby', client_num_queued_key, client, 1) +end + +return {reachedHWM, blocked, strategy} diff --git a/node_modules/bottleneck/src/redis/update_settings.lua b/node_modules/bottleneck/src/redis/update_settings.lua new file mode 100644 index 000000000..f0e8fcd51 --- /dev/null +++ b/node_modules/bottleneck/src/redis/update_settings.lua @@ -0,0 +1,14 @@ +local args = {'hmset', settings_key} + +for i = num_static_argv + 1, #ARGV do + table.insert(args, ARGV[i]) +end + +redis.call(unpack(args)) + +process_tick(now, true) + +local groupTimeout = tonumber(redis.call('hget', settings_key, 'groupTimeout')) +refresh_expiration(0, 0, groupTimeout) + +return {} diff --git a/node_modules/bottleneck/src/redis/validate_client.lua b/node_modules/bottleneck/src/redis/validate_client.lua new file mode 100644 index 000000000..4f025e9ee --- /dev/null +++ b/node_modules/bottleneck/src/redis/validate_client.lua @@ -0,0 +1,5 @@ +if not redis.call('zscore', client_last_seen_key, client) then + return redis.error_reply('UNKNOWN_CLIENT') +end + +redis.call('zadd', client_last_seen_key, now, client) diff --git a/node_modules/bottleneck/src/redis/validate_keys.lua b/node_modules/bottleneck/src/redis/validate_keys.lua new file mode 100644 index 000000000..f53401715 --- /dev/null +++ b/node_modules/bottleneck/src/redis/validate_keys.lua @@ -0,0 +1,3 @@ +if not (redis.call('exists', settings_key) == 1) then + return redis.error_reply('SETTINGS_KEY_NOT_FOUND') +end diff --git a/node_modules/bottleneck/test.ts b/node_modules/bottleneck/test.ts new file mode 100644 index 000000000..bf064a714 --- /dev/null +++ b/node_modules/bottleneck/test.ts @@ -0,0 +1,335 @@ +/// + +import Bottleneck from "bottleneck"; +// import * as assert from "assert"; +function assert(b: boolean): void { } + +/* +This file is run by scripts/build.sh. +It is used to validate the typings in bottleneck.d.ts. +The command is: tsc --noEmit --strictNullChecks test.ts +This file cannot be run directly. +In order to do that, you must comment out the first line, +and change "bottleneck" to "." on the third line. +*/ + +function withCb(foo: number, bar: () => void, cb: (err: any, result: string) => void) { + let s: string = `cb ${foo}`; + cb(null, s); +} + +console.log(Bottleneck); + +let limiter = new Bottleneck({ + maxConcurrent: 5, + minTime: 1000, + highWater: 20, + strategy: Bottleneck.strategy.LEAK, + reservoirRefreshInterval: 1000 * 60, + reservoirRefreshAmount: 10, + reservoirIncreaseInterval: 1000 * 60, + reservoirIncreaseAmount: 2, + reservoirIncreaseMaximum: 15 +}); + +limiter.ready().then(() => { console.log('Ready') }); +limiter.clients().client; +limiter.disconnect(); + +limiter.currentReservoir().then(function (x) { + if (x != null) { + let i: number = x; + } +}); + +limiter.incrementReservoir(5).then(function (x) { + if (x != null) { + let i: number = x; + } +}); + +limiter.running().then(function (x) { + let i: number = x; +}); + +limiter.clusterQueued().then(function (x) { + let i: number = x; +}); + +limiter.done().then(function (x) { + let i: number = x; +}); + +limiter.submit(withCb, 1, () => {}, (err, result) => { + let s: string = result; + console.log(s); + assert(s == "cb 1"); +}); + +function withPromise(foo: number, bar: () => void): PromiseLike { + let s: string = `promise ${foo}`; + return Promise.resolve(s); +} + +let foo: Promise = limiter.schedule(withPromise, 1, () => {}); +foo.then(function (result: string) { + let s: string = result; + console.log(s); + assert(s == "promise 1"); +}); + +limiter.on("message", (msg) => console.log(msg)); + +limiter.publish(JSON.stringify({ a: "abc", b: { c: 123 }})); + +function checkEventInfo(info: Bottleneck.EventInfo) { + const numArgs: number = info.args.length; + const id: string = info.options.id; +} + +limiter.on('dropped', (info) => { + checkEventInfo(info) + const task: Function = info.task; + const promise: Promise = info.promise; +}) + +limiter.on('received', (info) => { + checkEventInfo(info) +}) + +limiter.on('queued', (info) => { + checkEventInfo(info) + const blocked: boolean = info.blocked; + const reachedHWM: boolean = info.reachedHWM; +}) + +limiter.on('scheduled', (info) => { + checkEventInfo(info) +}) + +limiter.on('executing', (info) => { + checkEventInfo(info) + const count: number = info.retryCount; +}) + +limiter.on('failed', (error, info) => { + checkEventInfo(info) + const message: string = error.message; + const count: number = info.retryCount; + return Promise.resolve(10) +}) + +limiter.on('failed', (error, info) => { + checkEventInfo(info) + const message: string = error.message; + const count: number = info.retryCount; + return Promise.resolve(null) +}) + +limiter.on('failed', (error, info) => { + checkEventInfo(info) + const message: string = error.message; + const count: number = info.retryCount; + return Promise.resolve() +}) + +limiter.on('failed', (error, info) => { + checkEventInfo(info) + const message: string = error.message; + const count: number = info.retryCount; + return 10 +}) + +limiter.on('failed', (error, info) => { + checkEventInfo(info) + const message: string = error.message; + const count: number = info.retryCount; + return null +}) + +limiter.on('failed', (error, info) => { + checkEventInfo(info) + const message: string = error.message; + const count: number = info.retryCount; +}) + +limiter.on('retry', (message: string, info) => { + checkEventInfo(info) + const count: number = info.retryCount; +}) + +limiter.on('done', (info) => { + checkEventInfo(info) + const count: number = info.retryCount; +}) + +let group = new Bottleneck.Group({ + maxConcurrent: 5, + minTime: 1000, + highWater: 10, + strategy: Bottleneck.strategy.LEAK, + datastore: "ioredis", + clearDatastore: true, + clientOptions: {}, + clusterNodes: [] +}); + +group.on('created', (limiter, key) => { + assert(limiter.empty()) + assert(key.length > 0) +}) + +group.key("foo").submit(withCb, 2, () => {}, (err, result) => { + let s: string = `${result} foo`; + console.log(s); + assert(s == "cb 2 foo"); +}); + +group.key("bar").submit({ priority: 4 }, withCb, 3, () => {}, (err, result) => { + let s: string = `${result} bar`; + console.log(s); + assert(s == "cb 3 foo"); +}); + +let f1: Promise = group.key("pizza").schedule(withPromise, 2, () => {}); +f1.then(function (result: string) { + let s: string = result; + console.log(s); + assert(s == "promise 2"); +}); + +let f2: Promise = group.key("pie").schedule({ priority: 4 }, withPromise, 3, () => {}); +f2.then(function (result: string) { + let s: string = result; + console.log(s); + assert(s == "promise 3"); +}); + +let wrapped = limiter.wrap((a: number, b: number) => { + let s: string = `Total: ${a + b}`; + return Promise.resolve(s); +}); + +wrapped(1, 2).then((x) => { + let s: string = x; + console.log(s); + assert(s == "Total: 3"); +}); + +wrapped.withOptions({ priority: 1, id: 'some-id' }, 9, 9).then((x) => { + let s: string = x; + console.log(s); + assert(s == "Total: 18"); +}) + +let counts = limiter.counts(); +console.log(`${counts.EXECUTING + 2}`); +console.log(limiter.jobStatus('some-id')) +console.log(limiter.jobs()); +console.log(limiter.jobs(Bottleneck.Status.RUNNING)); + + +group.deleteKey("pizza") +.then(function (deleted: boolean) { + console.log(deleted) +}); +group.updateSettings({ timeout: 5, maxConcurrent: null, reservoir: null }); + +let keys: string[] = group.keys(); +assert(keys.length == 3); + +group.clusterKeys() +.then(function (allKeys: string[]) { + let count = allKeys.length; +}) + +let queued: number = limiter.chain(group.key("pizza")).queued(); + +limiter.stop({ + dropWaitingJobs: true, + dropErrorMessage: "Begone!", + enqueueErrorMessage: "Denied!" +}).then(() => { + console.log('All stopped.') +}) + +wrapped(4, 5).catch((e) => { + assert(e.message === "Denied!") +}) + +const id: string = limiter.id; +const datastore: string = limiter.datastore; +const channel: string = limiter.channel(); + +const redisConnection = new Bottleneck.RedisConnection({ + client: "NodeRedis client object", + clientOptions: {} +}) + +redisConnection.ready() +.then(function (redisConnectionClients) { + const client = redisConnectionClients.client; + const subscriber = redisConnectionClients.subscriber; +}) + +redisConnection.on("error", (err) => { + console.log(err.message) +}) + +const limiterWithConn = new Bottleneck({ + connection: redisConnection +}) + +const ioredisConnection = new Bottleneck.IORedisConnection({ + client: "ioredis client object", + clientOptions: {}, + clusterNodes: [] +}) + +ioredisConnection.ready() +.then(function (ioredisConnectionClients) { + const client = ioredisConnectionClients.client; + const subscriber = ioredisConnectionClients.subscriber; +}) + +ioredisConnection.on("error", (err: Bottleneck.BottleneckError) => { + console.log(err.message) +}) + +const groupWithConn = new Bottleneck.Group({ + connection: ioredisConnection +}) + +const limiterWithConnFromGroup = new Bottleneck({ + connection: groupWithConn.connection +}) + +const groupWithConnFromLimiter = new Bottleneck.Group({ + connection: limiterWithConn.connection +}) + + +const batcher = new Bottleneck.Batcher({ + maxTime: 1000, + maxSize: 10 +}) + +batcher.on("batch", (batch) => { + const len: number = batch.length + console.log("Number of elements:", len) +}) + +batcher.on("error", (err: Bottleneck.BottleneckError) => { + console.log(err.message) +}) + +batcher.add("abc") +batcher.add({ xyz: 5 }) +.then(() => console.log("Flushed!")) + +const object = {} +const emitter = new Bottleneck.Events(object) +const listenerCount: number = emitter.listenerCount('info') +emitter.trigger('info', 'hello', 'world', 123).then(function (result) { + console.log(result) +}) diff --git a/node_modules/bottleneck/test/DLList.js b/node_modules/bottleneck/test/DLList.js new file mode 100644 index 000000000..505bdcd9f --- /dev/null +++ b/node_modules/bottleneck/test/DLList.js @@ -0,0 +1,148 @@ +var DLList = require('../lib/DLList') +var assert = require('assert') +var c = require('./context')({datastore: 'local'}) + +var fakeQueues = function () { + this._length = 0 + this.incr = () => this._length++ + this.decr = () => this._length-- + this.fns = [this.incr, this.decr] +} + +describe('DLList', function () { + + it('Should be created and be empty', function () { + var queues = new fakeQueues() + var list = new DLList() + c.mustEqual(list.getArray().length, 0) + }) + + it('Should be possible to append once', function () { + var queues = new fakeQueues() + var list = new DLList(...queues.fns) + list.push(5) + var arr = list.getArray() + c.mustEqual(arr.length, 1) + c.mustEqual(list.length, 1) + c.mustEqual(queues._length, 1) + c.mustEqual(arr[0], 5) + }) + + it('Should be possible to append multiple times', function () { + var queues = new fakeQueues() + var list = new DLList(...queues.fns) + list.push(5) + list.push(6) + var arr = list.getArray() + c.mustEqual(arr.length, 2) + c.mustEqual(list.length, 2) + c.mustEqual(queues._length, 2) + c.mustEqual(arr[0], 5) + c.mustEqual(arr[1], 6) + + list.push(10) + + arr = list.getArray() + c.mustEqual(arr.length, 3) + c.mustEqual(list.length, 3) + c.mustEqual(arr[0], 5) + c.mustEqual(arr[1], 6) + c.mustEqual(arr[2], 10) + }) + + it('Should be possible to shift an empty list', function () { + var queues = new fakeQueues() + var list = new DLList(...queues.fns) + c.mustEqual(list.length, 0) + assert(list.shift() === undefined) + var arr = list.getArray() + c.mustEqual(arr.length, 0) + c.mustEqual(list.length, 0) + assert(list.shift() === undefined) + arr = list.getArray() + c.mustEqual(arr.length, 0) + c.mustEqual(list.length, 0) + c.mustEqual(queues._length, 0) + }) + + it('Should be possible to append then shift once', function () { + var queues = new fakeQueues() + var list = new DLList(...queues.fns) + list.push(5) + c.mustEqual(list.length, 1) + c.mustEqual(list.shift(), 5) + var arr = list.getArray() + c.mustEqual(arr.length, 0) + c.mustEqual(list.length, 0) + c.mustEqual(queues._length, 0) + }) + + it('Should be possible to append then shift multiple times', function () { + var queues = new fakeQueues() + var list = new DLList(...queues.fns) + list.push(5) + c.mustEqual(list.length, 1) + c.mustEqual(list.shift(), 5) + c.mustEqual(list.length, 0) + + list.push(6) + c.mustEqual(list.length, 1) + c.mustEqual(list.shift(), 6) + c.mustEqual(list.length, 0) + c.mustEqual(queues._length, 0) + }) + + it('Should pass a full test', function () { + var queues = new fakeQueues() + var list = new DLList(...queues.fns) + list.push(10) + c.mustEqual(list.length, 1) + list.push("11") + c.mustEqual(list.length, 2) + list.push(12) + c.mustEqual(list.length, 3) + c.mustEqual(queues._length, 3) + + c.mustEqual(list.shift(), 10) + c.mustEqual(list.length, 2) + c.mustEqual(list.shift(),"11") + c.mustEqual(list.length, 1) + + list.push(true) + c.mustEqual(list.length, 2) + + var arr = list.getArray() + c.mustEqual(arr[0], 12) + c.mustEqual(arr[1], true) + c.mustEqual(arr.length, 2) + c.mustEqual(queues._length, 2) + }) + + it('Should return the first value without shifting', function () { + var queues = new fakeQueues() + var list = new DLList(...queues.fns) + assert(list.first() === undefined) + assert(list.first() === undefined) + + list.push(1) + c.mustEqual(list.first(), 1) + c.mustEqual(list.first(), 1) + + list.push(2) + c.mustEqual(list.first(), 1) + c.mustEqual(list.first(), 1) + + c.mustEqual(list.shift(), 1) + c.mustEqual(list.first(), 2) + c.mustEqual(list.first(), 2) + + c.mustEqual(list.shift(), 2) + assert(list.first() === undefined) + assert(list.first() === undefined) + + assert(list.first() === undefined) + assert(list.shift() === undefined) + assert(list.first() === undefined) + }) + +}) diff --git a/node_modules/bottleneck/test/batcher.js b/node_modules/bottleneck/test/batcher.js new file mode 100644 index 000000000..c195367f4 --- /dev/null +++ b/node_modules/bottleneck/test/batcher.js @@ -0,0 +1,209 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') + +describe('Batcher', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should batch by time and size', function () { + c = makeTest() + var batcher = new Bottleneck.Batcher({ + maxTime: 50, + maxSize: 3 + }) + var t0 = Date.now() + var batches = [] + + batcher.on('batch', function (batcher) { + batches.push(batcher) + }) + + return Promise.all([ + batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)), + batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)), + batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)), + batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)), + batcher.add(5).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 5)) + ]) + .then(function (data) { + c.mustEqual( + data.map((([t, x]) => [Math.floor(t / 50), x])), + [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]] + ) + + return c.last() + }) + .then(function (results) { + c.checkDuration(50, 20) + c.mustEqual(batches, [[1, 2, 3], [4, 5]]) + }) + }) + + it('Should batch by time', function () { + c = makeTest() + var batcher = new Bottleneck.Batcher({ + maxTime: 50 + }) + var t0 = Date.now() + var batches = [] + + batcher.on('batch', function (batcher) { + batches.push(batcher) + }) + + return Promise.all([ + batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)), + batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)) + ]) + .then(function (data) { + c.mustEqual( + data.map((([t, x]) => [Math.floor(t / 50), x])), + [[1, 1], [1, 2]] + ) + + return Promise.all([ + batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3)), + batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)) + ]) + }) + .then(function (data) { + c.mustEqual( + data.map((([t, x]) => [Math.floor(t / 50), x])), + [[2, 3], [2, 4]] + ) + + return c.last() + }) + .then(function (results) { + c.checkDuration(100) + c.mustEqual(batches, [[1, 2], [3, 4]]) + }) + }) + + it('Should batch by size', function () { + c = makeTest() + var batcher = new Bottleneck.Batcher({ + maxSize: 2 + }) + var batches = [] + + batcher.on('batch', function (batcher) { + batches.push(batcher) + }) + + return Promise.all([ + batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, 1)), + batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, 2)) + ]) + .then(function () { + return Promise.all([ + batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, 3)), + batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, 4)) + ]) + }) + .then(c.last) + .then(function (results) { + c.checkDuration(0) + c.mustEqual(batches, [[1, 2], [3, 4]]) + }) + }) + + it('Should stagger flushes', function () { + c = makeTest() + var batcher = new Bottleneck.Batcher({ + maxTime: 50, + maxSize: 3 + }) + var t0 = Date.now() + var batches = [] + + batcher.on('batch', function (batcher) { + batches.push(batcher) + }) + + return Promise.all([ + batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1)), + batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2)) + ]) + .then(function (data) { + c.mustEqual( + data.map((([t, x]) => [Math.floor(t / 50), x])), + [[1, 1], [1, 2]] + ) + + var promises = [] + promises.push(batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3))) + + return c.wait(10) + .then(function () { + promises.push(batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4))) + + return Promise.all(promises) + }) + }) + .then(function (data) { + c.mustEqual( + data.map((([t, x]) => [Math.floor(t / 50), x])), + [[2, 3], [2, 4]] + ) + + return c.last() + }) + .then(function (results) { + c.checkDuration(120, 20) + c.mustEqual(batches, [[1, 2], [3, 4]]) + }) + }) + + it('Should force then stagger flushes', function () { + c = makeTest() + var batcher = new Bottleneck.Batcher({ + maxTime: 50, + maxSize: 3 + }) + var t0 = Date.now() + var batches = [] + + batcher.on('batch', function (batcher) { + batches.push(batcher) + }) + + var promises = [] + promises.push(batcher.add(1).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 1))) + promises.push(batcher.add(2).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 2))) + + return c.wait(10) + .then(function () { + promises.push(batcher.add(3).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 3))) + + return Promise.all(promises) + }) + .then(function (data) { + c.mustEqual( + data.map((([t, x]) => [Math.floor(t / 50), x])), + [[0, 1], [0, 2], [0, 3]] + ) + + return Promise.all([ + batcher.add(4).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 4)), + batcher.add(5).then((x) => c.limiter.schedule(c.promise, null, Date.now() - t0, 5)), + ]) + }) + .then(function (data) { + c.mustEqual( + data.map((([t, x]) => [Math.floor(t / 50), x])), + [[1, 4], [1, 5]] + ) + + return c.last() + }) + .then(function (results) { + c.checkDuration(85, 25) + c.mustEqual(batches, [[1, 2, 3], [4, 5]]) + }) + }) +}) diff --git a/node_modules/bottleneck/test/bottleneck.js b/node_modules/bottleneck/test/bottleneck.js new file mode 100644 index 000000000..a3bc0c88e --- /dev/null +++ b/node_modules/bottleneck/test/bottleneck.js @@ -0,0 +1,7 @@ +if (process.env.BUILD === 'es5') { + module.exports = require('../es5.js') +} else if (process.env.BUILD === 'light') { + module.exports = require('../light.js') +} else { + module.exports = require('../lib/index.js') +} diff --git a/node_modules/bottleneck/test/cluster.js b/node_modules/bottleneck/test/cluster.js new file mode 100644 index 000000000..5b28404f7 --- /dev/null +++ b/node_modules/bottleneck/test/cluster.js @@ -0,0 +1,1549 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var Scripts = require('../lib/Scripts.js') +var assert = require('assert') +var packagejson = require('../package.json') + +if (process.env.DATASTORE === 'redis' || process.env.DATASTORE === 'ioredis') { + + var limiterKeys = function (limiter) { + return Scripts.allKeys(limiter._store.originalId) + } + var countKeys = function (limiter) { + return runCommand(limiter, 'exists', limiterKeys(limiter)) + } + var deleteKeys = function (limiter) { + return runCommand(limiter, 'del', limiterKeys(limiter)) + } + var runCommand = function (limiter, command, args) { + return new Promise(function (resolve, reject) { + limiter._store.clients.client[command](...args, function (err, data) { + if (err != null) return reject(err) + return resolve(data) + }) + }) + } + + describe('Cluster-only', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should return a promise for ready()', function () { + c = makeTest({ maxConcurrent: 2 }) + + return c.limiter.ready() + }) + + it('Should return clients', function () { + c = makeTest({ maxConcurrent: 2 }) + + return c.limiter.ready() + .then(function (clients) { + c.mustEqual(Object.keys(clients), ['client', 'subscriber']) + c.mustEqual(Object.keys(c.limiter.clients()), ['client', 'subscriber']) + }) + }) + + it('Should return a promise when disconnecting', function () { + c = makeTest({ maxConcurrent: 2 }) + + return c.limiter.disconnect() + .then(function () { + // do nothing + }) + }) + + it('Should allow passing a limiter\'s connection to a new limiter', function () { + c = makeTest() + c.limiter.connection.id = 'some-id' + var limiter = new Bottleneck({ + minTime: 50, + connection: c.limiter.connection + }) + + return Promise.all([c.limiter.ready(), limiter.ready()]) + .then(function () { + c.mustEqual(limiter.connection.id, 'some-id') + c.mustEqual(limiter.datastore, process.env.DATASTORE) + + return Promise.all([ + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1), + c.pNoErrVal(limiter.schedule(c.promise, null, 2), 2) + ]) + }) + .then(c.last) + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(0) + }) + }) + + it('Should allow passing a limiter\'s connection to a new Group', function () { + c = makeTest() + c.limiter.connection.id = 'some-id' + var group = new Bottleneck.Group({ + minTime: 50, + connection: c.limiter.connection + }) + var limiter1 = group.key('A') + var limiter2 = group.key('B') + + return Promise.all([c.limiter.ready(), limiter1.ready(), limiter2.ready()]) + .then(function () { + c.mustEqual(limiter1.connection.id, 'some-id') + c.mustEqual(limiter2.connection.id, 'some-id') + c.mustEqual(limiter1.datastore, process.env.DATASTORE) + c.mustEqual(limiter2.datastore, process.env.DATASTORE) + + return Promise.all([ + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1), + c.pNoErrVal(limiter1.schedule(c.promise, null, 2), 2), + c.pNoErrVal(limiter2.schedule(c.promise, null, 3), 3) + ]) + }) + .then(c.last) + .then(function (results) { + c.checkResultsOrder([[1], [2], [3]]) + c.checkDuration(0) + }) + }) + + it('Should allow passing a Group\'s connection to a new limiter', function () { + c = makeTest() + var group = new Bottleneck.Group({ + minTime: 50, + datastore: process.env.DATASTORE, + clearDatastore: true + }) + group.connection.id = 'some-id' + + var limiter1 = group.key('A') + var limiter2 = new Bottleneck({ + minTime: 50, + connection: group.connection + }) + + return Promise.all([limiter1.ready(), limiter2.ready()]) + .then(function () { + c.mustEqual(limiter1.connection.id, 'some-id') + c.mustEqual(limiter2.connection.id, 'some-id') + c.mustEqual(limiter1.datastore, process.env.DATASTORE) + c.mustEqual(limiter2.datastore, process.env.DATASTORE) + + return Promise.all([ + c.pNoErrVal(limiter1.schedule(c.promise, null, 1), 1), + c.pNoErrVal(limiter2.schedule(c.promise, null, 2), 2) + ]) + }) + .then(c.last) + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(0) + return group.disconnect() + }) + }) + + it('Should allow passing a Group\'s connection to a new Group', function () { + c = makeTest() + var group1 = new Bottleneck.Group({ + minTime: 50, + datastore: process.env.DATASTORE, + clearDatastore: true + }) + group1.connection.id = 'some-id' + + var group2 = new Bottleneck.Group({ + minTime: 50, + connection: group1.connection, + clearDatastore: true + }) + + var limiter1 = group1.key('AAA') + var limiter2 = group1.key('BBB') + var limiter3 = group1.key('CCC') + var limiter4 = group1.key('DDD') + + return Promise.all([ + limiter1.ready(), + limiter2.ready(), + limiter3.ready(), + limiter4.ready() + ]) + .then(function () { + c.mustEqual(group1.connection.id, 'some-id') + c.mustEqual(group2.connection.id, 'some-id') + c.mustEqual(limiter1.connection.id, 'some-id') + c.mustEqual(limiter2.connection.id, 'some-id') + c.mustEqual(limiter3.connection.id, 'some-id') + c.mustEqual(limiter4.connection.id, 'some-id') + c.mustEqual(limiter1.datastore, process.env.DATASTORE) + c.mustEqual(limiter2.datastore, process.env.DATASTORE) + c.mustEqual(limiter3.datastore, process.env.DATASTORE) + c.mustEqual(limiter4.datastore, process.env.DATASTORE) + + return Promise.all([ + c.pNoErrVal(limiter1.schedule(c.promise, null, 1), 1), + c.pNoErrVal(limiter2.schedule(c.promise, null, 2), 2), + c.pNoErrVal(limiter3.schedule(c.promise, null, 3), 3), + c.pNoErrVal(limiter4.schedule(c.promise, null, 4), 4) + ]) + }) + .then(c.last) + .then(function (results) { + c.checkResultsOrder([[1], [2], [3], [4]]) + c.checkDuration(0) + return group1.disconnect() + }) + }) + + it('Should not have a key TTL by default for standalone limiters', function () { + c = makeTest() + + return c.limiter.ready() + .then(function () { + var settings_key = limiterKeys(c.limiter)[0] + return runCommand(c.limiter, 'ttl', [settings_key]) + }) + .then(function (ttl) { + assert(ttl < 0) + }) + }) + + it('Should allow timeout setting for standalone limiters', function () { + c = makeTest({ timeout: 5 * 60 * 1000 }) + + return c.limiter.ready() + .then(function () { + var settings_key = limiterKeys(c.limiter)[0] + return runCommand(c.limiter, 'ttl', [settings_key]) + }) + .then(function (ttl) { + assert(ttl >= 290 && ttl <= 305) + }) + }) + + it('Should compute reservoir increased based on number of missed intervals', async function () { + const settings = { + id: 'missed-intervals', + clearDatastore: false, + reservoir: 2, + reservoirIncreaseInterval: 100, + reservoirIncreaseAmount: 2, + timeout: 2000 + } + c = makeTest({ ...settings }) + await c.limiter.ready() + + c.mustEqual(await c.limiter.currentReservoir(), 2) + + const settings_key = limiterKeys(c.limiter)[0] + await runCommand(c.limiter, 'hincrby', [settings_key, 'lastReservoirIncrease', -3000]) + + const limiter2 = new Bottleneck({ ...settings, datastore: process.env.DATASTORE }) + await limiter2.ready() + + c.mustEqual(await c.limiter.currentReservoir(), 62) // 2 + ((3000 / 100) * 2) === 62 + + await limiter2.disconnect() + }) + + it('Should migrate from 2.8.0', function () { + c = makeTest({ id: 'migrate' }) + var settings_key = limiterKeys(c.limiter)[0] + var limiter2 + + return c.limiter.ready() + .then(function () { + var settings_key = limiterKeys(c.limiter)[0] + return Promise.all([ + runCommand(c.limiter, 'hset', [settings_key, 'version', '2.8.0']), + runCommand(c.limiter, 'hdel', [settings_key, 'done', 'capacityPriorityCounter', 'clientTimeout']), + runCommand(c.limiter, 'hset', [settings_key, 'lastReservoirRefresh', '']) + ]) + }) + .then(function () { + limiter2 = new Bottleneck({ + id: 'migrate', + datastore: process.env.DATASTORE + }) + return limiter2.ready() + }) + .then(function () { + return runCommand(c.limiter, 'hmget', [ + settings_key, + 'version', + 'done', + 'reservoirRefreshInterval', + 'reservoirRefreshAmount', + 'capacityPriorityCounter', + 'clientTimeout', + 'reservoirIncreaseAmount', + 'reservoirIncreaseMaximum', + // Add new values here, before these 2 timestamps + 'lastReservoirRefresh', + 'lastReservoirIncrease' + ]) + }) + .then(function (values) { + var timestamps = values.slice(-2) + timestamps.forEach((t) => assert(parseInt(t) > Date.now() - 500)) + c.mustEqual(values.slice(0, -timestamps.length), [ + '2.18.0', + '0', + '', + '', + '0', + '10000', + '', + '' + ]) + }) + .then(function () { + return limiter2.disconnect(false) + }) + }) + + it('Should keep track of each client\'s queue length', async function () { + c = makeTest({ + id: 'queues', + maxConcurrent: 1, + trackDoneStatus: true + }) + var limiter2 = new Bottleneck({ + datastore: process.env.DATASTORE, + id: 'queues', + maxConcurrent: 1, + trackDoneStatus: true + }) + var client_num_queued_key = limiterKeys(c.limiter)[5] + var clientId1 = c.limiter._store.clientId + var clientId2 = limiter2._store.clientId + + await c.limiter.ready() + await limiter2.ready() + + var p0 = c.limiter.schedule({id: 0}, c.slowPromise, 100, null, 0) + await c.limiter._submitLock.schedule(() => Promise.resolve()) + + var p1 = c.limiter.schedule({id: 1}, c.promise, null, 1) + var p2 = c.limiter.schedule({id: 2}, c.promise, null, 2) + var p3 = limiter2.schedule({id: 3}, c.promise, null, 3) + + await Promise.all([ + c.limiter._submitLock.schedule(() => Promise.resolve()), + limiter2._submitLock.schedule(() => Promise.resolve()) + ]) + + var queuedA = await runCommand(c.limiter, 'hgetall', [client_num_queued_key]) + c.mustEqual(c.limiter.counts().QUEUED, 2) + c.mustEqual(limiter2.counts().QUEUED, 1) + c.mustEqual(~~queuedA[clientId1], 2) + c.mustEqual(~~queuedA[clientId2], 1) + + c.mustEqual(await c.limiter.clusterQueued(), 3) + + await Promise.all([p0, p1, p2, p3]) + var queuedB = await runCommand(c.limiter, 'hgetall', [client_num_queued_key]) + c.mustEqual(c.limiter.counts().QUEUED, 0) + c.mustEqual(limiter2.counts().QUEUED, 0) + c.mustEqual(~~queuedB[clientId1], 0) + c.mustEqual(~~queuedB[clientId2], 0) + c.mustEqual(c.limiter.counts().DONE, 3) + c.mustEqual(limiter2.counts().DONE, 1) + + c.mustEqual(await c.limiter.clusterQueued(), 0) + + return limiter2.disconnect(false) + }) + + it('Should publish capacity increases', function () { + c = makeTest({ maxConcurrent: 2 }) + var limiter2 + var p3, p4 + + return c.limiter.ready() + .then(function () { + limiter2 = new Bottleneck({ datastore: process.env.DATASTORE }) + return limiter2.ready() + }) + .then(function () { + var p1 = c.limiter.schedule({id: 1}, c.slowPromise, 100, null, 1) + var p2 = c.limiter.schedule({id: 2}, c.slowPromise, 100, null, 2) + + return c.limiter.schedule({id: 0, weight: 0}, c.promise, null, 0) + }) + .then(function () { + return limiter2.schedule({id: 3}, c.slowPromise, 100, null, 3) + }) + .then(c.last) + .then(function (results) { + c.checkResultsOrder([[0], [1], [2], [3]]) + c.checkDuration(200) + + return limiter2.disconnect(false) + }) + }) + + it('Should publish capacity changes on reservoir changes', function () { + c = makeTest({ + maxConcurrent: 2, + reservoir: 2 + }) + var limiter2 + var p3, p4 + + return c.limiter.ready() + .then(function () { + limiter2 = new Bottleneck({ + datastore: process.env.DATASTORE, + }) + return limiter2.ready() + }) + .then(function () { + var p1 = c.limiter.schedule({id: 1}, c.slowPromise, 100, null, 1) + var p2 = c.limiter.schedule({id: 2}, c.slowPromise, 100, null, 2) + + return c.limiter.schedule({id: 0, weight: 0}, c.promise, null, 0) + }) + .then(function () { + p3 = limiter2.schedule({id: 3, weight: 2}, c.slowPromise, 100, null, 3) + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + return c.limiter.updateSettings({ reservoir: 1 }) + }) + .then(function () { + return c.limiter.incrementReservoir(1) + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 2) + return p3 + }) + .then(function (result) { + c.mustEqual(result, [3]) + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + return c.last({ weight: 0 }) + }) + .then(function (results) { + c.checkResultsOrder([[0], [1], [2], [3]]) + c.checkDuration(210) + }) + .then(function (data) { + return limiter2.disconnect(false) + }) + }) + + it('Should remove track job data and remove lost jobs', function () { + c = makeTest({ + id: 'lost', + errorEventsExpected: true + }) + var clientId = c.limiter._store.clientId + var limiter1 = new Bottleneck({ datastore: process.env.DATASTORE }) + var limiter2 = new Bottleneck({ + id: 'lost', + datastore: process.env.DATASTORE, + heartbeatInterval: 150 + }) + var getData = function (limiter) { + c.mustEqual(limiterKeys(limiter).length, 8) // Asserting, to remember to edit this test when keys change + var [ + settings_key, + job_weights_key, + job_expirations_key, + job_clients_key, + client_running_key, + client_num_queued_key, + client_last_registered_key, + client_last_seen_key + ] = limiterKeys(limiter) + + return Promise.all([ + runCommand(limiter1, 'hmget', [settings_key, 'running', 'done']), + runCommand(limiter1, 'hgetall', [job_weights_key]), + runCommand(limiter1, 'zcard', [job_expirations_key]), + runCommand(limiter1, 'hvals', [job_clients_key]), + runCommand(limiter1, 'zrange', [client_running_key, '0', '-1', 'withscores']), + runCommand(limiter1, 'hvals', [client_num_queued_key]), + runCommand(limiter1, 'zrange', [client_last_registered_key, '0', '-1', 'withscores']), + runCommand(limiter1, 'zrange', [client_last_seen_key, '0', '-1', 'withscores']) + ]) + } + var sumWeights = function (weights) { + return Object.keys(weights).reduce((acc, x) => { + return acc + ~~weights[x] + }, 0) + } + var numExpirations = 0 + var errorHandler = function (err) { + if (err.message.indexOf('This job timed out') === 0) { + numExpirations++ + } + } + + return Promise.all([c.limiter.ready(), limiter1.ready(), limiter2.ready()]) + .then(function () { + // No expiration, it should not be removed + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.slowPromise, 150, null, 1), 1), + + // Expiration present, these jobs should be removed automatically + c.limiter.schedule({ expiration: 50, weight: 2 }, c.slowPromise, 75, null, 2).catch(errorHandler) + c.limiter.schedule({ expiration: 50, weight: 3 }, c.slowPromise, 75, null, 3).catch(errorHandler) + c.limiter.schedule({ expiration: 50, weight: 4 }, c.slowPromise, 75, null, 4).catch(errorHandler) + c.limiter.schedule({ expiration: 50, weight: 5 }, c.slowPromise, 75, null, 5).catch(errorHandler) + + return c.limiter._submitLock.schedule(() => Promise.resolve(true)) + }) + .then(function () { + return c.limiter._drainAll() + }) + .then(function () { + return c.limiter.disconnect(false) + }) + .then(function () { + }) + .then(function () { + return getData(c.limiter) + }) + .then(function ([ + settings, + job_weights, + job_expirations, + job_clients, + client_running, + client_num_queued, + client_last_registered, + client_last_seen + ]) { + c.mustEqual(settings, ['15', '0']) + c.mustEqual(sumWeights(job_weights), 15) + c.mustEqual(job_expirations, 4) + c.mustEqual(job_clients.length, 5) + job_clients.forEach((id) => c.mustEqual(id, clientId)) + c.mustEqual(sumWeights(client_running), 15) + c.mustEqual(client_num_queued, ['0', '0']) + c.mustEqual(client_last_registered[1], '0') + assert(client_last_seen[1] > Date.now() - 1000) + var passed = Date.now() - parseFloat(client_last_registered[3]) + assert(passed > 0 && passed < 20) + + return c.wait(170) + }) + .then(function () { + return getData(c.limiter) + }) + .then(function ([ + settings, + job_weights, + job_expirations, + job_clients, + client_running, + client_num_queued, + client_last_registered, + client_last_seen + ]) { + c.mustEqual(settings, ['1', '14']) + c.mustEqual(sumWeights(job_weights), 1) + c.mustEqual(job_expirations, 0) + c.mustEqual(job_clients.length, 1) + job_clients.forEach((id) => c.mustEqual(id, clientId)) + c.mustEqual(sumWeights(client_running), 1) + c.mustEqual(client_num_queued, ['0', '0']) + c.mustEqual(client_last_registered[1], '0') + assert(client_last_seen[1] > Date.now() - 1000) + var passed = Date.now() - parseFloat(client_last_registered[3]) + assert(passed > 170 && passed < 200) + + c.mustEqual(numExpirations, 4) + }) + .then(function () { + return Promise.all([ + limiter1.disconnect(false), + limiter2.disconnect(false) + ]) + }) + }) + + it('Should clear unresponsive clients', async function () { + c = makeTest({ + id: 'unresponsive', + maxConcurrent: 1, + timeout: 1000, + clientTimeout: 100, + heartbeat: 50 + }) + const limiter2 = new Bottleneck({ + id: 'unresponsive', + datastore: process.env.DATASTORE + }) + + await Promise.all([c.limiter.running(), limiter2.running()]) + + const client_running_key = limiterKeys(limiter2)[4] + const client_num_queued_key = limiterKeys(limiter2)[5] + const client_last_registered_key = limiterKeys(limiter2)[6] + const client_last_seen_key = limiterKeys(limiter2)[7] + const numClients = () => Promise.all([ + runCommand(c.limiter, 'zcard', [client_running_key]), + runCommand(c.limiter, 'hlen', [client_num_queued_key]), + runCommand(c.limiter, 'zcard', [client_last_registered_key]), + runCommand(c.limiter, 'zcard', [client_last_seen_key]) + ]) + + c.mustEqual(await numClients(), [2, 2, 2, 2]) + + await limiter2.disconnect(false) + await c.wait(150) + + await c.limiter.running() + + c.mustEqual(await numClients(), [1, 1, 1, 1]) + + }) + + + it('Should not clear unresponsive clients with unexpired running jobs', async function () { + c = makeTest({ + id: 'unresponsive-unexpired', + maxConcurrent: 1, + timeout: 1000, + clientTimeout: 200, + heartbeat: 2000 + }) + const limiter2 = new Bottleneck({ + id: 'unresponsive-unexpired', + datastore: process.env.DATASTORE + }) + + await c.limiter.ready() + await limiter2.ready() + + const client_running_key = limiterKeys(limiter2)[4] + const client_num_queued_key = limiterKeys(limiter2)[5] + const client_last_registered_key = limiterKeys(limiter2)[6] + const client_last_seen_key = limiterKeys(limiter2)[7] + const numClients = () => Promise.all([ + runCommand(limiter2, 'zcard', [client_running_key]), + runCommand(limiter2, 'hlen', [client_num_queued_key]), + runCommand(limiter2, 'zcard', [client_last_registered_key]), + runCommand(limiter2, 'zcard', [client_last_seen_key]) + ]) + + const job = c.limiter.schedule(c.slowPromise, 500, null, 1) + + await c.wait(300) + + // running() triggers process_tick and that will attempt to remove client 1 + // but it shouldn't do it because it has a running job + c.mustEqual(await limiter2.running(), 1) + + c.mustEqual(await numClients(), [2, 2, 2, 2]) + + await job + + c.mustEqual(await limiter2.running(), 0) + + await limiter2.disconnect(false) + }) + + it('Should clear unresponsive clients after last jobs are expired', async function () { + c = makeTest({ + id: 'unresponsive-expired', + maxConcurrent: 1, + timeout: 1000, + clientTimeout: 200, + heartbeat: 2000 + }) + const limiter2 = new Bottleneck({ + id: 'unresponsive-expired', + datastore: process.env.DATASTORE + }) + + await c.limiter.ready() + await limiter2.ready() + + const client_running_key = limiterKeys(limiter2)[4] + const client_num_queued_key = limiterKeys(limiter2)[5] + const client_last_registered_key = limiterKeys(limiter2)[6] + const client_last_seen_key = limiterKeys(limiter2)[7] + const numClients = () => Promise.all([ + runCommand(limiter2, 'zcard', [client_running_key]), + runCommand(limiter2, 'hlen', [client_num_queued_key]), + runCommand(limiter2, 'zcard', [client_last_registered_key]), + runCommand(limiter2, 'zcard', [client_last_seen_key]) + ]) + + const job = c.limiter.schedule({ expiration: 250 }, c.slowPromise, 300, null, 1) + await c.wait(100) // wait for it to register + + c.mustEqual(await c.limiter.running(), 1) + c.mustEqual(await numClients(), [2,2,2,2]) + + let dropped = false + try { + await job + } catch (e) { + if (e.message === 'This job timed out after 250 ms.') { + dropped = true + } else { + throw e + } + } + assert(dropped) + + await c.wait(200) + + c.mustEqual(await limiter2.running(), 0) + c.mustEqual(await numClients(), [1,1,1,1]) + + await limiter2.disconnect(false) + }) + + it('Should use shared settings', function () { + c = makeTest({ maxConcurrent: 2 }) + var limiter2 = new Bottleneck({ maxConcurrent: 1, datastore: process.env.DATASTORE }) + + return Promise.all([ + limiter2.schedule(c.slowPromise, 100, null, 1), + limiter2.schedule(c.slowPromise, 100, null, 2) + ]) + .then(function () { + return limiter2.disconnect(false) + }) + .then(function () { + return c.last() + }) + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(100) + }) + }) + + it('Should clear previous settings', function () { + c = makeTest({ maxConcurrent: 2 }) + var limiter2 + + return c.limiter.ready() + .then(function () { + limiter2 = new Bottleneck({ maxConcurrent: 1, datastore: process.env.DATASTORE, clearDatastore: true }) + return limiter2.ready() + }) + .then(function () { + return Promise.all([ + c.limiter.schedule(c.slowPromise, 100, null, 1), + c.limiter.schedule(c.slowPromise, 100, null, 2) + ]) + }) + .then(function () { + return limiter2.disconnect(false) + }) + .then(function () { + return c.last() + }) + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(200) + }) + }) + + it('Should safely handle connection failures', function () { + c = makeTest({ + clientOptions: { port: 1 }, + errorEventsExpected: true + }) + + return new Promise(function (resolve, reject) { + c.limiter.on('error', function (err) { + assert(err != null) + resolve() + }) + + c.limiter.ready() + .then(function () { + reject(new Error('Should not have connected')) + }) + .catch(function (err) { + reject(err) + }) + }) + }) + + it('Should chain local and distributed limiters (total concurrency)', function () { + c = makeTest({ id: 'limiter1', maxConcurrent: 3 }) + var limiter2 = new Bottleneck({ id: 'limiter2', maxConcurrent: 1 }) + var limiter3 = new Bottleneck({ id: 'limiter3', maxConcurrent: 2 }) + + limiter2.on('error', (err) => console.log(err)) + + limiter2.chain(c.limiter) + limiter3.chain(c.limiter) + + return Promise.all([ + limiter2.schedule(c.slowPromise, 100, null, 1), + limiter2.schedule(c.slowPromise, 100, null, 2), + limiter2.schedule(c.slowPromise, 100, null, 3), + limiter3.schedule(c.slowPromise, 100, null, 4), + limiter3.schedule(c.slowPromise, 100, null, 5), + limiter3.schedule(c.slowPromise, 100, null, 6) + ]) + .then(c.last) + .then(function (results) { + c.checkDuration(300) + c.checkResultsOrder([[1], [4], [5], [2], [6], [3]]) + + assert(results.calls[0].time >= 100 && results.calls[0].time < 200) + assert(results.calls[1].time >= 100 && results.calls[1].time < 200) + assert(results.calls[2].time >= 100 && results.calls[2].time < 200) + + assert(results.calls[3].time >= 200 && results.calls[3].time < 300) + assert(results.calls[4].time >= 200 && results.calls[4].time < 300) + + assert(results.calls[5].time >= 300 && results.calls[2].time < 400) + }) + }) + + it('Should chain local and distributed limiters (partial concurrency)', function () { + c = makeTest({ maxConcurrent: 2 }) + var limiter2 = new Bottleneck({ maxConcurrent: 1 }) + var limiter3 = new Bottleneck({ maxConcurrent: 2 }) + + limiter2.chain(c.limiter) + limiter3.chain(c.limiter) + + return Promise.all([ + limiter2.schedule(c.slowPromise, 100, null, 1), + limiter2.schedule(c.slowPromise, 100, null, 2), + limiter2.schedule(c.slowPromise, 100, null, 3), + limiter3.schedule(c.slowPromise, 100, null, 4), + limiter3.schedule(c.slowPromise, 100, null, 5), + limiter3.schedule(c.slowPromise, 100, null, 6) + ]) + .then(c.last) + .then(function (results) { + c.checkDuration(300) + c.checkResultsOrder([[1], [4], [5], [2], [6], [3]]) + + assert(results.calls[0].time >= 100 && results.calls[0].time < 200) + assert(results.calls[1].time >= 100 && results.calls[1].time < 200) + + assert(results.calls[2].time >= 200 && results.calls[2].time < 300) + assert(results.calls[3].time >= 200 && results.calls[3].time < 300) + + assert(results.calls[4].time >= 300 && results.calls[4].time < 400) + assert(results.calls[5].time >= 300 && results.calls[2].time < 400) + }) + }) + + it('Should use the limiter ID to build Redis keys', function () { + c = makeTest() + var randomId = c.limiter._randomIndex() + var limiter = new Bottleneck({ id: randomId, datastore: process.env.DATASTORE, clearDatastore: true }) + + return limiter.ready() + .then(function () { + var keys = limiterKeys(limiter) + keys.forEach((key) => assert(key.indexOf(randomId) > 0)) + return deleteKeys(limiter) + }) + .then(function (deleted) { + c.mustEqual(deleted, 5) + return limiter.disconnect(false) + }) + }) + + it('Should not fail when Redis data is missing', function () { + c = makeTest() + var limiter = new Bottleneck({ datastore: process.env.DATASTORE, clearDatastore: true }) + + return limiter.running() + .then(function (running) { + c.mustEqual(running, 0) + return deleteKeys(limiter) + }) + .then(function (deleted) { + c.mustEqual(deleted, 5) + return countKeys(limiter) + }) + .then(function (count) { + c.mustEqual(count, 0) + return limiter.running() + }) + .then(function (running) { + c.mustEqual(running, 0) + return countKeys(limiter) + }) + .then(function (count) { + assert(count > 0) + return limiter.disconnect(false) + }) + }) + + it('Should drop all jobs in the Cluster when entering blocked mode', function () { + c = makeTest() + var limiter1 = new Bottleneck({ + id: 'blocked', + trackDoneStatus: true, + datastore: process.env.DATASTORE, + clearDatastore: true, + + maxConcurrent: 1, + minTime: 50, + highWater: 2, + strategy: Bottleneck.strategy.BLOCK + }) + var limiter2 + var client_num_queued_key = limiterKeys(limiter1)[5] + + return limiter1.ready() + .then(function () { + limiter2 = new Bottleneck({ + id: 'blocked', + trackDoneStatus: true, + datastore: process.env.DATASTORE, + clearDatastore: false, + }) + return limiter2.ready() + }) + .then(function () { + return Promise.all([ + limiter1.submit(c.slowJob, 100, null, 1, c.noErrVal(1)), + limiter1.submit(c.slowJob, 100, null, 2, (err) => c.mustExist(err)) + ]) + }) + .then(function () { + return Promise.all([ + limiter2.submit(c.slowJob, 100, null, 3, (err) => c.mustExist(err)), + limiter2.submit(c.slowJob, 100, null, 4, (err) => c.mustExist(err)), + limiter2.submit(c.slowJob, 100, null, 5, (err) => c.mustExist(err)) + ]) + }) + .then(function () { + return runCommand(limiter1, 'hvals', [client_num_queued_key]) + }) + .then(function (queues) { + c.mustEqual(queues, ['0', '0']) + + return Promise.all([ + c.limiter.clusterQueued(), + limiter2.clusterQueued() + ]) + }) + .then(function (queues) { + c.mustEqual(queues, [0, 0]) + + return c.wait(100) + }) + .then(function () { + var counts1 = limiter1.counts() + c.mustEqual(counts1.RECEIVED, 0) + c.mustEqual(counts1.QUEUED, 0) + c.mustEqual(counts1.RUNNING, 0) + c.mustEqual(counts1.EXECUTING, 0) + c.mustEqual(counts1.DONE, 1) + + var counts2 = limiter2.counts() + c.mustEqual(counts2.RECEIVED, 0) + c.mustEqual(counts2.QUEUED, 0) + c.mustEqual(counts2.RUNNING, 0) + c.mustEqual(counts2.EXECUTING, 0) + c.mustEqual(counts2.DONE, 0) + + return c.last() + }) + .then(function (results) { + c.checkResultsOrder([[1]]) + c.checkDuration(100) + + return Promise.all([ + limiter1.disconnect(false), + limiter2.disconnect(false) + ]) + }) + }) + + it('Should pass messages to all limiters in Cluster', function (done) { + c = makeTest({ + maxConcurrent: 1, + minTime: 100, + id: 'super-duper' + }) + var limiter1 = new Bottleneck({ + maxConcurrent: 1, + minTime: 100, + id: 'super-duper', + datastore: process.env.DATASTORE + }) + var limiter2 = new Bottleneck({ + maxConcurrent: 1, + minTime: 100, + id: 'nope', + datastore: process.env.DATASTORE + }) + var received = [] + + c.limiter.on('message', (msg) => { + received.push(1, msg) + }) + limiter1.on('message', (msg) => { + received.push(2, msg) + }) + limiter2.on('message', (msg) => { + received.push(3, msg) + }) + + Promise.all([c.limiter.ready(), limiter2.ready()]) + .then(function () { + limiter1.publish(555) + }) + + setTimeout(function () { + limiter1.disconnect() + limiter2.disconnect() + c.mustEqual(received.sort(), [1, 2, '555', '555']) + done() + }, 150) + }) + + it('Should pass messages to correct limiter after Group re-instantiations', function () { + c = makeTest() + var group = new Bottleneck.Group({ + maxConcurrent: 1, + minTime: 100, + datastore: process.env.DATASTORE + }) + var received = [] + + return new Promise(function (resolve, reject) { + var limiter = group.key('A') + + limiter.on('message', function (msg) { + received.push('1', msg) + return resolve() + }) + limiter.publish('Bonjour!') + }) + .then(function () { + return new Promise(function (resolve, reject) { + var limiter = group.key('B') + + limiter.on('message', function (msg) { + received.push('2', msg) + return resolve() + }) + limiter.publish('Comment allez-vous?') + }) + }) + .then(function () { + return group.deleteKey('A') + }) + .then(function () { + return new Promise(function (resolve, reject) { + var limiter = group.key('A') + + limiter.on('message', function (msg) { + received.push('3', msg) + return resolve() + }) + limiter.publish('Au revoir!') + }) + }) + .then(function () { + c.mustEqual(received, ['1', 'Bonjour!', '2', 'Comment allez-vous?', '3', 'Au revoir!']) + group.disconnect() + }) + }) + + it('Should have a default key TTL when using Groups', function () { + c = makeTest() + var group = new Bottleneck.Group({ + datastore: process.env.DATASTORE + }) + + return group.key('one').ready() + .then(function () { + var limiter = group.key('one') + var settings_key = limiterKeys(limiter)[0] + return runCommand(limiter, 'ttl', [settings_key]) + }) + .then(function (ttl) { + assert(ttl >= 290 && ttl <= 305) + }) + .then(function () { + return group.disconnect(false) + }) + }) + + it('Should support Groups and expire Redis keys', function () { + c = makeTest() + var group = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + minTime: 50, + timeout: 200 + }) + var limiter1 + var limiter2 + var limiter3 + + var t0 = Date.now() + var results = {} + var job = function (x) { + results[x] = Date.now() - t0 + return Promise.resolve() + } + + return c.limiter.ready() + .then(function () { + limiter1 = group.key('one') + limiter2 = group.key('two') + limiter3 = group.key('three') + + return Promise.all([limiter1.ready(), limiter2.ready(), limiter3.ready()]) + }) + .then(function () { + return Promise.all([countKeys(limiter1), countKeys(limiter2), countKeys(limiter3)]) + }) + .then(function (counts) { + c.mustEqual(counts, [5, 5, 5]) + return Promise.all([ + limiter1.schedule(job, 'a'), + limiter1.schedule(job, 'b'), + limiter1.schedule(job, 'c'), + limiter2.schedule(job, 'd'), + limiter2.schedule(job, 'e'), + limiter3.schedule(job, 'f') + ]) + }) + .then(function () { + c.mustEqual(Object.keys(results).length, 6) + assert(results.a < results.b) + assert(results.b < results.c) + assert(results.b - results.a >= 40) + assert(results.c - results.b >= 40) + + assert(results.d < results.e) + assert(results.e - results.d >= 40) + + assert(Math.abs(results.a - results.d) <= 10) + assert(Math.abs(results.d - results.f) <= 10) + assert(Math.abs(results.b - results.e) <= 10) + + return c.wait(400) + }) + .then(function () { + return Promise.all([countKeys(limiter1), countKeys(limiter2), countKeys(limiter3)]) + }) + .then(function (counts) { + c.mustEqual(counts, [0, 0, 0]) + c.mustEqual(group.keys().length, 0) + c.mustEqual(Object.keys(group.connection.limiters).length, 0) + return group.disconnect(false) + }) + + }) + + it('Should not recreate a key when running heartbeat', function () { + c = makeTest() + var group = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + maxConcurrent: 50, + minTime: 50, + timeout: 300, + heartbeatInterval: 5 + }) + var key = 'heartbeat' + + var limiter = group.key(key) + return c.pNoErrVal(limiter.schedule(c.promise, null, 1), 1) + .then(function () { + return limiter.done() + }) + .then(function (done) { + c.mustEqual(done, 1) + return c.wait(400) + }) + .then(function () { + return countKeys(limiter) + }) + .then(function (count) { + c.mustEqual(count, 0) + return group.disconnect(false) + }) + }) + + it('Should delete Redis key when manually deleting a group key', function () { + c = makeTest() + var group1 = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + maxConcurrent: 50, + minTime: 50, + timeout: 300 + }) + var group2 = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + maxConcurrent: 50, + minTime: 50, + timeout: 300 + }) + var key = 'deleted' + var limiter = group1.key(key) // only for countKeys() use + + return c.pNoErrVal(group1.key(key).schedule(c.promise, null, 1), 1) + .then(function () { + return c.pNoErrVal(group2.key(key).schedule(c.promise, null, 2), 2) + }) + .then(function () { + c.mustEqual(group1.keys().length, 1) + c.mustEqual(group2.keys().length, 1) + return group1.deleteKey(key) + }) + .then(function (deleted) { + c.mustEqual(deleted, true) + return countKeys(limiter) + }) + .then(function (count) { + c.mustEqual(count, 0) + c.mustEqual(group1.keys().length, 0) + c.mustEqual(group2.keys().length, 1) + return c.wait(200) + }) + .then(function () { + c.mustEqual(group1.keys().length, 0) + c.mustEqual(group2.keys().length, 0) + return Promise.all([ + group1.disconnect(false), + group2.disconnect(false) + ]) + }) + }) + + it('Should delete Redis keys from a group even when the local limiter is not present', function () { + c = makeTest() + var group1 = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + maxConcurrent: 50, + minTime: 50, + timeout: 300 + }) + var group2 = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + maxConcurrent: 50, + minTime: 50, + timeout: 300 + }) + var key = 'deleted-cluster-wide' + var limiter = group1.key(key) // only for countKeys() use + + return c.pNoErrVal(group1.key(key).schedule(c.promise, null, 1), 1) + .then(function () { + c.mustEqual(group1.keys().length, 1) + c.mustEqual(group2.keys().length, 0) + return group2.deleteKey(key) + }) + .then(function (deleted) { + c.mustEqual(deleted, true) + return countKeys(limiter) + }) + .then(function (count) { + c.mustEqual(count, 0) + c.mustEqual(group1.keys().length, 1) + c.mustEqual(group2.keys().length, 0) + return c.wait(200) + }) + .then(function () { + c.mustEqual(group1.keys().length, 0) + c.mustEqual(group2.keys().length, 0) + return Promise.all([ + group1.disconnect(false), + group2.disconnect(false) + ]) + }) + }) + + it('Should returns all Group keys in the cluster', async function () { + c = makeTest() + var group1 = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'same', + timeout: 3000 + }) + var group2 = new Bottleneck.Group({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'same', + timeout: 3000 + }) + var keys1 = ['lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur'] + var keys2 = ['adipiscing', 'elit'] + var both = keys1.concat(keys2) + + await Promise.all(keys1.map((k) => group1.key(k).ready())) + await Promise.all(keys2.map((k) => group2.key(k).ready())) + + c.mustEqual(group1.keys().sort(), keys1.sort()) + c.mustEqual(group2.keys().sort(), keys2.sort()) + c.mustEqual( + (await group1.clusterKeys()).sort(), + both.sort() + ) + c.mustEqual( + (await group1.clusterKeys()).sort(), + both.sort() + ) + + var group3 = new Bottleneck.Group({ datastore: 'local' }) + c.mustEqual(await group3.clusterKeys(), []) + + await group1.disconnect(false) + await group2.disconnect(false) + }) + + it('Should queue up the least busy limiter', async function () { + c = makeTest() + var limiter1 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var limiter2 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var limiter3 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var limiter4 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var runningOrExecuting = function (limiter) { + var counts = limiter.counts() + return counts.RUNNING + counts.EXECUTING + } + + var resolve1, resolve2, resolve3, resolve4, resolve5, resolve6, resolve7 + var p1 = new Promise(function (resolve, reject) { + resolve1 = function (err, n) { resolve(n) } + }) + var p2 = new Promise(function (resolve, reject) { + resolve2 = function (err, n) { resolve(n) } + }) + var p3 = new Promise(function (resolve, reject) { + resolve3 = function (err, n) { resolve(n) } + }) + var p4 = new Promise(function (resolve, reject) { + resolve4 = function (err, n) { resolve(n) } + }) + var p5 = new Promise(function (resolve, reject) { + resolve5 = function (err, n) { resolve(n) } + }) + var p6 = new Promise(function (resolve, reject) { + resolve6 = function (err, n) { resolve(n) } + }) + var p7 = new Promise(function (resolve, reject) { + resolve7 = function (err, n) { resolve(n) } + }) + + await limiter1.schedule({id: '1'}, c.promise, null, 'A') + await limiter2.schedule({id: '2'}, c.promise, null, 'B') + await limiter3.schedule({id: '3'}, c.promise, null, 'C') + await limiter4.schedule({id: '4'}, c.promise, null, 'D') + + await limiter1.submit({id: 'A'}, c.slowJob, 50, null, 1, resolve1) + await limiter1.submit({id: 'B'}, c.slowJob, 500, null, 2, resolve2) + await limiter2.submit({id: 'C'}, c.slowJob, 550, null, 3, resolve3) + + c.mustEqual(runningOrExecuting(limiter1), 2) + c.mustEqual(runningOrExecuting(limiter2), 1) + + await limiter3.submit({id: 'D'}, c.slowJob, 50, null, 4, resolve4) + await limiter4.submit({id: 'E'}, c.slowJob, 50, null, 5, resolve5) + await limiter3.submit({id: 'F'}, c.slowJob, 50, null, 6, resolve6) + await limiter4.submit({id: 'G'}, c.slowJob, 50, null, 7, resolve7) + + c.mustEqual(limiter3.counts().QUEUED, 2) + c.mustEqual(limiter4.counts().QUEUED, 2) + + await Promise.all([p1, p2, p3, p4, p5, p6, p7]) + + c.checkResultsOrder([['A'],['B'],['C'],['D'],[1],[4],[5],[6],[7],[2],[3]]) + + await limiter1.disconnect(false) + await limiter2.disconnect(false) + await limiter3.disconnect(false) + await limiter4.disconnect(false) + }) + + it('Should pass the remaining capacity to other limiters', async function () { + c = makeTest() + var limiter1 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var limiter2 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var limiter3 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var limiter4 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'busy', + timeout: 3000, + maxConcurrent: 3, + trackDoneStatus: true + }) + var runningOrExecuting = function (limiter) { + var counts = limiter.counts() + return counts.RUNNING + counts.EXECUTING + } + var t3, t4 + + var resolve1, resolve2, resolve3, resolve4, resolve5 + var p1 = new Promise(function (resolve, reject) { + resolve1 = function (err, n) { resolve(n) } + }) + var p2 = new Promise(function (resolve, reject) { + resolve2 = function (err, n) { resolve(n) } + }) + var p3 = new Promise(function (resolve, reject) { + resolve3 = function (err, n) { t3 = Date.now(); resolve(n) } + }) + var p4 = new Promise(function (resolve, reject) { + resolve4 = function (err, n) { t4 = Date.now(); resolve(n) } + }) + var p5 = new Promise(function (resolve, reject) { + resolve5 = function (err, n) { resolve(n) } + }) + + await limiter1.schedule({id: '1'}, c.promise, null, 'A') + await limiter2.schedule({id: '2'}, c.promise, null, 'B') + await limiter3.schedule({id: '3'}, c.promise, null, 'C') + await limiter4.schedule({id: '4'}, c.promise, null, 'D') + + await limiter1.submit({id: 'A', weight: 2}, c.slowJob, 50, null, 1, resolve1) + await limiter2.submit({id: 'C'}, c.slowJob, 550, null, 2, resolve2) + + c.mustEqual(runningOrExecuting(limiter1), 1) + c.mustEqual(runningOrExecuting(limiter2), 1) + + await limiter3.submit({id: 'D'}, c.slowJob, 50, null, 3, resolve3) + await limiter4.submit({id: 'E'}, c.slowJob, 50, null, 4, resolve4) + await limiter4.submit({id: 'G'}, c.slowJob, 50, null, 5, resolve5) + + c.mustEqual(limiter3.counts().QUEUED, 1) + c.mustEqual(limiter4.counts().QUEUED, 2) + + await Promise.all([p1, p2, p3, p4, p5]) + + c.checkResultsOrder([['A'],['B'],['C'],['D'],[1],[3],[4],[5],[2]]) + + assert(Math.abs(t3 - t4) < 15) + + await limiter1.disconnect(false) + await limiter2.disconnect(false) + await limiter3.disconnect(false) + await limiter4.disconnect(false) + }) + + it('Should take the capacity and blacklist if the priority limiter is not responding', async function () { + c = makeTest() + var limiter1 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'crash', + timeout: 3000, + maxConcurrent: 1, + trackDoneStatus: true + }) + var limiter2 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'crash', + timeout: 3000, + maxConcurrent: 1, + trackDoneStatus: true + }) + var limiter3 = new Bottleneck({ + datastore: process.env.DATASTORE, + clearDatastore: true, + id: 'crash', + timeout: 3000, + maxConcurrent: 1, + trackDoneStatus: true + }) + + await limiter1.schedule({id: '1'}, c.promise, null, 'A') + await limiter2.schedule({id: '2'}, c.promise, null, 'B') + await limiter3.schedule({id: '3'}, c.promise, null, 'C') + + var resolve1, resolve2, resolve3 + var p1 = new Promise(function (resolve, reject) { + resolve1 = function (err, n) { resolve(n) } + }) + var p2 = new Promise(function (resolve, reject) { + resolve2 = function (err, n) { resolve(n) } + }) + var p3 = new Promise(function (resolve, reject) { + resolve3 = function (err, n) { resolve(n) } + }) + + await limiter1.submit({id: '4'}, c.slowJob, 100, null, 4, resolve1) + await limiter2.submit({id: '5'}, c.slowJob, 100, null, 5, resolve2) + await limiter3.submit({id: '6'}, c.slowJob, 100, null, 6, resolve3) + await limiter2.disconnect(false) + + await Promise.all([p1, p3]) + c.checkResultsOrder([['A'], ['B'], ['C'], [4], [6]]) + + await limiter1.disconnect(false) + await limiter2.disconnect(false) + await limiter3.disconnect(false) + }) + + }) +} diff --git a/node_modules/bottleneck/test/context.js b/node_modules/bottleneck/test/context.js new file mode 100644 index 000000000..8d498f3a2 --- /dev/null +++ b/node_modules/bottleneck/test/context.js @@ -0,0 +1,142 @@ +global.TEST = true +var Bottleneck = require('./bottleneck') +var assert = require('assert') + +module.exports = function (options={}) { + var mustEqual = function (a, b) { + var strA = JSON.stringify(a) + var strB = JSON.stringify(b) + if (strA !== strB) { + console.log(strA + ' !== ' + strB, (new Error('').stack)) + assert(strA === strB) + } + } + + var start + var calls = [] + + // set options.datastore + var setRedisClientOptions = function (options) { + options.clearDatastore = true + if (options.clientOptions == null) { + options.clientOptions = { + host: process.env.REDIS_HOST, + port: process.env.REDIS_PORT, + } + } + } + + if (options.datastore == null && process.env.DATASTORE === 'redis') { + options.datastore = 'redis' + setRedisClientOptions(options) + } else if (options.datastore == null && process.env.DATASTORE === 'ioredis') { + options.datastore = 'ioredis' + setRedisClientOptions(options) + } else { + options.datastore = 'local' + } + + var limiter = new Bottleneck(options) + // limiter.on("debug", function (str, args) { console.log(`${Date.now()-start} ${str} ${JSON.stringify(args)}`) }) + if (!options.errorEventsExpected) { + limiter.on("error", function (err) { + console.log('(CONTEXT) ERROR EVENT', err) + }) + } + limiter.ready().then(function (client) { + start = Date.now() + }) + var getResults = function () { + return { + elapsed: Date.now() - start, + callsDuration: calls.length > 0 ? calls[calls.length - 1].time : null, + calls: calls + } + } + + var context = { + job: function (err, ...result) { + var cb = result.pop() + calls.push({err: err, result: result, time: Date.now()-start}) + if (process.env.DEBUG) console.log(result, calls) + cb.apply({}, [err].concat(result)) + }, + slowJob: function (duration, err, ...result) { + setTimeout(function () { + var cb = result.pop() + calls.push({err: err, result: result, time: Date.now()-start}) + if (process.env.DEBUG) console.log(result, calls) + cb.apply({}, [err].concat(result)) + }, duration) + }, + promise: function (err, ...result) { + return new Promise(function (resolve, reject) { + if (process.env.DEBUG) console.log('In c.promise. Result: ', result) + calls.push({err: err, result: result, time: Date.now()-start}) + if (process.env.DEBUG) console.log(result, calls) + if (err === null) { + return resolve(result) + } else { + return reject(err) + } + }) + }, + slowPromise: function (duration, err, ...result) { + return new Promise(function (resolve, reject) { + setTimeout(function () { + if (process.env.DEBUG) console.log('In c.slowPromise. Result: ', result) + calls.push({err: err, result: result, time: Date.now()-start}) + if (process.env.DEBUG) console.log(result, calls) + if (err === null) { + return resolve(result) + } else { + return reject(err) + } + }, duration) + }) + }, + pNoErrVal: function (promise, ...expected) { + if (process.env.DEBUG) console.log('In c.pNoErrVal. Expected:', expected) + return promise.then(function (actual) { + mustEqual(actual, expected) + }) + }, + noErrVal: function (...expected) { + return function (err, ...actual) { + mustEqual(err, null) + mustEqual(actual, expected) + } + }, + last: function (options) { + var opt = options != null ? options : {} + return limiter.schedule(opt, function () { return Promise.resolve(getResults()) }) + .catch(function (err) { console.error("Error in context.last:", err)}) + }, + wait: function (wait) { + return new Promise(function (resolve, reject) { + setTimeout(resolve, wait) + }) + }, + limiter: limiter, + mustEqual: mustEqual, + mustExist: function (a) { assert(a != null) }, + results: getResults, + checkResultsOrder: function (order) { + mustEqual(order.length, calls.length) + for (var i = 0; i < Math.max(calls.length, order.length); i++) { + mustEqual(order[i], calls[i].result) + } + }, + checkDuration: function (shouldBe, minBound = 10) { + var results = getResults() + var min = shouldBe - minBound + var max = shouldBe + 50 + if (!(results.callsDuration > min && results.callsDuration < max)) { + console.error('Duration not around ' + shouldBe + '. Was ' + results.callsDuration) + } + assert(results.callsDuration > min && results.callsDuration < max) + } + } + + return context +} diff --git a/node_modules/bottleneck/test/general.js b/node_modules/bottleneck/test/general.js new file mode 100644 index 000000000..14aa63a51 --- /dev/null +++ b/node_modules/bottleneck/test/general.js @@ -0,0 +1,867 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') +var child_process = require('child_process') + +describe('General', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + if ( + process.env.DATASTORE !== 'redis' && process.env.DATASTORE !== 'ioredis' && + process.env.BUILD !== 'es5' && process.env.BUILD !== 'light' + ) { + it('Should not leak memory on instantiation', async function () { + c = makeTest() + this.timeout(8000) + const { iterate } = require('leakage') + + const result = await iterate.async(async () => { + const limiter = new Bottleneck({ datastore: 'local' }) + await limiter.ready() + return limiter.disconnect(false) + }, { iterations: 25 }) + + }) + + it('Should not leak memory running jobs', async function () { + c = makeTest() + this.timeout(12000) + const { iterate } = require('leakage') + const limiter = new Bottleneck({ datastore: 'local', maxConcurrent: 1, minTime: 10 }) + await limiter.ready() + var ctr = 0 + var i = 0 + + const result = await iterate.async(async () => { + await limiter.schedule(function (zero, one) { + i = i + zero + one + }, 0, 1) + await limiter.schedule(function (zero, one) { + i = i + zero + one + }, 0, 1) + }, { iterations: 25 }) + c.mustEqual(i, 302) + }) + } + + it('Should prompt to upgrade', function () { + c = makeTest() + try { + var limiter = new Bottleneck(1, 250) + } catch (err) { + c.mustEqual(err.message, 'Bottleneck v2 takes a single object argument. Refer to https://github.com/SGrondin/bottleneck#upgrading-to-v2 if you\'re upgrading from Bottleneck v1.') + } + }) + + it('Should allow null capacity', function () { + c = makeTest({ id: 'null', minTime: 0 }) + return c.limiter.updateSettings({ minTime: 10 }) + }) + + it('Should keep scope', async function () { + c = makeTest({ maxConcurrent: 1 }) + + class Job { + constructor() { + this.value = 5 + } + action(x) { + return this.value + x + } + } + var job = new Job() + + c.mustEqual(6, await c.limiter.schedule(() => job.action.bind(job)(1))) + c.mustEqual(7, await c.limiter.wrap(job.action.bind(job))(2)) + }) + + it('Should pass multiple arguments back even on errors when using submit()', function (done) { + c = makeTest({ maxConcurrent: 1 }) + + c.limiter.submit(c.job, new Error('welp'), 1, 2, function (err, x, y) { + c.mustEqual(err.message, 'welp') + c.mustEqual(x, 1) + c.mustEqual(y, 2) + done() + }) + }) + + it('Should expose the Events library', function (cb) { + c = makeTest() + + class Hello { + constructor() { + this.emitter = new Bottleneck.Events(this) + } + + doSomething() { + this.emitter.trigger('info', 'hello', 'world', 123) + return 5 + } + } + + const myObject = new Hello(); + myObject.on('info', (...args) => { + c.mustEqual(args, ['hello', 'world', 123]) + cb() + }) + myObject.doSomething() + c.mustEqual(myObject.emitter.listenerCount('info'), 1) + c.mustEqual(myObject.emitter.listenerCount('nothing'), 0) + + myObject.on('blah', '') + myObject.on('blah', null) + myObject.on('blah') + return myObject.emitter.trigger('blah') + }) + + describe('Counts and statuses', function () { + it('Should check() and return the queued count with and without a priority value', async function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + c.mustEqual(await c.limiter.check(), true) + + c.mustEqual(c.limiter.queued(), 0) + c.mustEqual(await c.limiter.clusterQueued(), 0) + + await c.limiter.submit({id: 1}, c.slowJob, 50, null, 1, c.noErrVal(1)) + c.mustEqual(c.limiter.queued(), 0) // It's already running + + c.mustEqual(await c.limiter.check(), false) + + await c.limiter.submit({id: 2}, c.slowJob, 50, null, 2, c.noErrVal(2)) + c.mustEqual(c.limiter.queued(), 1) + c.mustEqual(await c.limiter.clusterQueued(), 1) + c.mustEqual(c.limiter.queued(1), 0) + c.mustEqual(c.limiter.queued(5), 1) + + await c.limiter.submit({id: 3}, c.slowJob, 50, null, 3, c.noErrVal(3)) + c.mustEqual(c.limiter.queued(), 2) + c.mustEqual(await c.limiter.clusterQueued(), 2) + c.mustEqual(c.limiter.queued(1), 0) + c.mustEqual(c.limiter.queued(5), 2) + + await c.limiter.submit({id: 4}, c.slowJob, 50, null, 4, c.noErrVal(4)) + c.mustEqual(c.limiter.queued(), 3) + c.mustEqual(await c.limiter.clusterQueued(), 3) + c.mustEqual(c.limiter.queued(1), 0) + c.mustEqual(c.limiter.queued(5), 3) + + await c.limiter.submit({priority: 1, id: 5}, c.job, null, 5, c.noErrVal(5)) + c.mustEqual(c.limiter.queued(), 4) + c.mustEqual(await c.limiter.clusterQueued(), 4) + c.mustEqual(c.limiter.queued(1), 1) + c.mustEqual(c.limiter.queued(5), 3) + + var results = await c.last() + c.mustEqual(c.limiter.queued(), 0) + c.mustEqual(await c.limiter.clusterQueued(), 0) + c.checkResultsOrder([[1], [5], [2], [3], [4]]) + c.checkDuration(450) + }) + + it('Should return the running and done counts', function () { + c = makeTest({maxConcurrent: 5, minTime: 0}) + + return Promise.all([c.limiter.running(), c.limiter.done()]) + .then(function ([running, done]) { + c.mustEqual(running, 0) + c.mustEqual(done, 0) + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({ weight: 3, id: 2 }, c.slowPromise, 200, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 3 }, c.slowPromise, 100, null, 3), 3) + + return c.limiter.schedule({ weight: 0, id: 4 }, c.promise, null) + }) + .then(function () { + return Promise.all([c.limiter.running(), c.limiter.done()]) + }) + .then(function ([running, done]) { + c.mustEqual(running, 5) + c.mustEqual(done, 0) + return c.wait(125) + }) + .then(function () { + return Promise.all([c.limiter.running(), c.limiter.done()]) + }) + .then(function ([running, done]) { + c.mustEqual(running, 3) + c.mustEqual(done, 2) + return c.wait(100) + }) + .then(function () { + return Promise.all([c.limiter.running(), c.limiter.done()]) + }) + .then(function ([running, done]) { + c.mustEqual(running, 0) + c.mustEqual(done, 5) + return c.last() + }) + .then(function (results) { + c.checkDuration(200) + c.checkResultsOrder([[], [1], [3], [2]]) + }) + }) + + it('Should refuse duplicate Job IDs', async function () { + c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true}) + + try { + await c.limiter.schedule({ id: 'a' }, c.promise, null, 1) + await c.limiter.schedule({ id: 'b' }, c.promise, null, 2) + await c.limiter.schedule({ id: 'a' }, c.promise, null, 3) + } catch (e) { + c.mustEqual(e.message, 'A job with the same id already exists (id=a)') + } + }) + + it('Should return job statuses', function () { + c = makeTest({maxConcurrent: 2, minTime: 100}) + + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0 }) + + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3) + c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0 }) + + return c.wait(50) + .then(function () { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1 }) + c.mustEqual(c.limiter.jobStatus(1), 'EXECUTING') + c.mustEqual(c.limiter.jobStatus(2), 'RUNNING') + c.mustEqual(c.limiter.jobStatus(3), 'QUEUED') + + return c.last() + }) + .then(function (results) { + c.checkDuration(400) + c.checkResultsOrder([[1], [2], [3]]) + }) + }) + + it('Should return job statuses, including DONE', function () { + c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true}) + + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 }) + + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3) + c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 }) + + return c.wait(50) + .then(function () { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1, DONE: 0 }) + c.mustEqual(c.limiter.jobStatus(1), 'EXECUTING') + c.mustEqual(c.limiter.jobStatus(2), 'RUNNING') + c.mustEqual(c.limiter.jobStatus(3), 'QUEUED') + + return c.wait(100) + }) + .then(function () { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 0, EXECUTING: 1, DONE: 1 }) + c.mustEqual(c.limiter.jobStatus(1), 'DONE') + c.mustEqual(c.limiter.jobStatus(2), 'EXECUTING') + c.mustEqual(c.limiter.jobStatus(3), 'QUEUED') + + return c.last() + }) + .then(function (results) { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 4 }) + c.checkDuration(400) + c.checkResultsOrder([[1], [2], [3]]) + }) + }) + + it('Should return jobs for a status', function () { + c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true}) + + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 }) + + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3) + c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 }) + + c.mustEqual(c.limiter.jobs(), ['1', '2', '3']) + c.mustEqual(c.limiter.jobs('RECEIVED'), ['1', '2', '3']) + + return c.wait(50) + .then(function () { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1, DONE: 0 }) + c.mustEqual(c.limiter.jobs('EXECUTING'), ['1']) + c.mustEqual(c.limiter.jobs('RUNNING'), ['2']) + c.mustEqual(c.limiter.jobs('QUEUED'), ['3']) + + return c.wait(100) + }) + .then(function () { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 0, EXECUTING: 1, DONE: 1 }) + c.mustEqual(c.limiter.jobs('DONE'), ['1']) + c.mustEqual(c.limiter.jobs('EXECUTING'), ['2']) + c.mustEqual(c.limiter.jobs('QUEUED'), ['3']) + + return c.last() + }) + .then(function (results) { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 4 }) + c.checkDuration(400) + c.checkResultsOrder([[1], [2], [3]]) + }) + }) + + it('Should trigger events on status changes', function () { + c = makeTest({maxConcurrent: 2, minTime: 100, trackDoneStatus: true}) + var onReceived = 0 + var onQueued = 0 + var onScheduled = 0 + var onExecuting = 0 + var onDone = 0 + c.limiter.on('received', (info) => { + c.mustEqual(Object.keys(info).sort(), ['args', 'options']) + onReceived++ + }) + c.limiter.on('queued', (info) => { + c.mustEqual(Object.keys(info).sort(), ['args', 'blocked', 'options', 'reachedHWM']) + onQueued++ + }) + c.limiter.on('scheduled', (info) => { + c.mustEqual(Object.keys(info).sort(), ['args', 'options']) + onScheduled++ + }) + c.limiter.on('executing', (info) => { + c.mustEqual(Object.keys(info).sort(), ['args', 'options', 'retryCount']) + onExecuting++ + }) + c.limiter.on('done', (info) => { + c.mustEqual(Object.keys(info).sort(), ['args', 'options', 'retryCount']) + onDone++ + }) + + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 }) + + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 2 }, c.slowPromise, 200, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 3 }, c.slowPromise, 100, null, 3), 3) + c.mustEqual(c.limiter.counts(), { RECEIVED: 3, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 0 }) + + c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [3, 0, 0, 0, 0]) + + return c.wait(50) + .then(function () { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 1, EXECUTING: 1, DONE: 0 }) + c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [3, 3, 2, 1, 0]) + + return c.wait(100) + }) + .then(function () { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 1, RUNNING: 0, EXECUTING: 1, DONE: 1 }) + c.mustEqual(c.limiter.jobs('DONE'), ['1']) + c.mustEqual(c.limiter.jobs('EXECUTING'), ['2']) + c.mustEqual(c.limiter.jobs('QUEUED'), ['3']) + c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [3, 3, 2, 2, 1]) + + return c.last() + }) + .then(function (results) { + c.mustEqual(c.limiter.counts(), { RECEIVED: 0, QUEUED: 0, RUNNING: 0, EXECUTING: 0, DONE: 4 }) + c.mustEqual([onReceived, onQueued, onScheduled, onExecuting, onDone], [4, 4, 4, 4, 4]) + c.checkDuration(400) + c.checkResultsOrder([[1], [2], [3]]) + }) + }) + }) + + describe('Events', function () { + it('Should return itself', function () { + c = makeTest({ id: 'test-limiter' }) + + var returned = c.limiter.on('ready', function () { }) + c.mustEqual(returned.id, 'test-limiter') + }) + + it('Should fire events on empty queue', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + var calledEmpty = 0 + var calledIdle = 0 + var calledDepleted = 0 + + c.limiter.on('empty', function () { calledEmpty++ }) + c.limiter.on('idle', function () { calledIdle++ }) + c.limiter.on('depleted', function () { calledDepleted++ }) + + return c.pNoErrVal(c.limiter.schedule({id: 1}, c.slowPromise, 50, null, 1), 1) + .then(function () { + c.mustEqual(calledEmpty, 1) + c.mustEqual(calledIdle, 1) + return Promise.all([ + c.pNoErrVal(c.limiter.schedule({id: 2}, c.slowPromise, 50, null, 2), 2), + c.pNoErrVal(c.limiter.schedule({id: 3}, c.slowPromise, 50, null, 3), 3) + ]) + }) + .then(function () { + return c.limiter.submit({id: 4}, c.slowJob, 50, null, 4, null) + }) + .then(function () { + c.checkDuration(250) + c.checkResultsOrder([[1], [2], [3]]) + c.mustEqual(calledEmpty, 3) + c.mustEqual(calledIdle, 2) + c.mustEqual(calledDepleted, 0) + return c.last() + }) + }) + + it('Should fire events once', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + var calledEmptyOnce = 0 + var calledIdleOnce = 0 + var calledEmpty = 0 + var calledIdle = 0 + var calledDepleted = 0 + + c.limiter.once('empty', function () { calledEmptyOnce++ }) + c.limiter.once('idle', function () { calledIdleOnce++ }) + c.limiter.on('empty', function () { calledEmpty++ }) + c.limiter.on('idle', function () { calledIdle++ }) + c.limiter.on('depleted', function () { calledDepleted++ }) + + c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1) + + return c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) + .then(function () { + c.mustEqual(calledEmptyOnce, 1) + c.mustEqual(calledIdleOnce, 1) + c.mustEqual(calledEmpty, 1) + c.mustEqual(calledIdle, 1) + return c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3) + }) + .then(function () { + c.checkDuration(200) + c.checkResultsOrder([[1], [2], [3]]) + c.mustEqual(calledEmptyOnce, 1) + c.mustEqual(calledIdleOnce, 1) + c.mustEqual(calledEmpty, 2) + c.mustEqual(calledIdle, 2) + c.mustEqual(calledDepleted, 0) + }) + }) + + it('Should support faulty event listeners', function (done) { + c = makeTest({maxConcurrent: 1, minTime: 100, errorEventsExpected: true}) + var calledError = 0 + + c.limiter.on('error', function (err) { + calledError++ + if (err.message === 'Oh noes!' && calledError === 1) { + done() + } + }) + c.limiter.on('empty', function () { + throw new Error('Oh noes!') + }) + + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) + }) + + it('Should wait for async event listeners', function (done) { + c = makeTest({maxConcurrent: 1, minTime: 100, errorEventsExpected: true}) + var calledError = 0 + + c.limiter.on('error', function (err) { + calledError++ + if (err.message === 'It broke!' && calledError === 1) { + done() + } + }) + c.limiter.on('empty', function () { + return c.slowPromise(100, null, 1, 2) + .then(function (x) { + c.mustEqual(x, [1, 2]) + return Promise.reject(new Error('It broke!')) + }) + }) + + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) + }) + }) + + describe('High water limit', function () { + it('Should support highWater set to 0', function () { + c = makeTest({maxConcurrent: 1, minTime: 0, highWater: 0, rejectOnDrop: false}) + + var first = c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1) + c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 2), 2) + c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 3), 3) + c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 4), 4) + + return first + .then(function () { + return c.last({ weight: 0 }) + }) + .then(function (results) { + c.checkDuration(50) + c.checkResultsOrder([[1]]) + }) + }) + + it('Should support highWater set to 1', function () { + c = makeTest({maxConcurrent: 1, minTime: 0, highWater: 1, rejectOnDrop: false}) + + var first = c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1) + c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 2), 2) + c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 3), 3) + var last = c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 4), 4) + + return Promise.all([first, last]) + .then(function () { + return c.last({ weight: 0 }) + }) + .then(function (results) { + c.checkDuration(100) + c.checkResultsOrder([[1], [4]]) + }) + }) + }) + + describe('Weight', function () { + it('Should not add jobs with a weight above the maxConcurrent', function () { + c = makeTest({maxConcurrent: 2}) + + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2) + + return c.limiter.schedule({ weight: 3 }, c.promise, null, 3) + .catch(function (err) { + c.mustEqual(err.message, 'Impossible to add a job having a weight of 3 to a limiter having a maxConcurrent setting of 2') + return c.last() + }) + .then(function (results) { + c.checkDuration(0) + c.checkResultsOrder([[1], [2]]) + }) + }) + + + it('Should support custom job weights', function () { + c = makeTest({maxConcurrent: 2}) + + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.slowPromise, 100, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.slowPromise, 200, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.slowPromise, 100, null, 3), 3) + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.slowPromise, 100, null, 4), 4) + c.pNoErrVal(c.limiter.schedule({ weight: 0 }, c.slowPromise, 100, null, 5), 5) + + return c.last() + .then(function (results) { + c.checkDuration(400) + c.checkResultsOrder([[1], [2], [3], [4], [5]]) + }) + }) + + it('Should overflow at the correct rate', function () { + c = makeTest({ + maxConcurrent: 2, + reservoir: 3 + }) + + var calledDepleted = 0 + var emptyArguments = [] + c.limiter.on('depleted', function (empty) { + emptyArguments.push(empty) + calledDepleted++ + }) + + var p1 = c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 1 }, c.slowPromise, 100, null, 1), 1) + var p2 = c.pNoErrVal(c.limiter.schedule({ weight: 2, id: 2 }, c.slowPromise, 150, null, 2), 2) + var p3 = c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 3 }, c.slowPromise, 100, null, 3), 3) + var p4 = c.pNoErrVal(c.limiter.schedule({ weight: 1, id: 4 }, c.slowPromise, 100, null, 4), 4) + + return Promise.all([p1, p2]) + .then(function () { + c.mustEqual(c.limiter.queued(), 2) + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + c.mustEqual(calledDepleted, 1) + return c.limiter.incrementReservoir(1) + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 1) + return c.last({ priority: 1, weight: 0 }) + }) + .then(function (results) { + c.mustEqual(calledDepleted, 3) + c.mustEqual(c.limiter.queued(), 1) + c.checkDuration(250) + c.checkResultsOrder([[1], [2]]) + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + return c.limiter.updateSettings({ reservoir: 1 }) + }) + .then(function () { + return Promise.all([p3, p4]) + }) + .then(function () { + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + c.mustEqual(calledDepleted, 4) + c.mustEqual(emptyArguments, [false, false, false, true]) + }) + }) + }) + + describe('Expiration', function () { + it('Should cancel jobs', function () { + c = makeTest({ maxConcurrent: 2 }) + var t0 = Date.now() + + return Promise.all([ + c.pNoErrVal(c.limiter.schedule({ id: 'very-slow-no-expiration' }, c.slowPromise, 150, null, 1), 1), + + c.limiter.schedule({ expiration: 50, id: 'slow-with-expiration' }, c.slowPromise, 75, null, 2) + .then(function () { + return Promise.reject(new Error("Should have timed out.")) + }) + .catch(function (err) { + c.mustEqual(err.message, 'This job timed out after 50 ms.') + var duration = Date.now() - t0 + assert(duration > 45 && duration < 80) + + return Promise.all([c.limiter.running(), c.limiter.done()]) + }) + .then(function ([running, done]) { + c.mustEqual(running, 1) + c.mustEqual(done, 1) + }) + + ]) + .then(function () { + var duration = Date.now() - t0 + assert(duration > 145 && duration < 180) + return Promise.all([c.limiter.running(), c.limiter.done()]) + }) + .then(function ([running, done]) { + c.mustEqual(running, 0) + c.mustEqual(done, 2) + }) + }) + }) + + describe('Pubsub', function () { + it('Should pass strings', function (done) { + c = makeTest({ maxConcurrent: 2 }) + + c.limiter.on('message', function (msg) { + c.mustEqual(msg, 'hello') + done() + }) + + c.limiter.publish('hello') + }) + + it('Should pass objects', function (done) { + c = makeTest({ maxConcurrent: 2 }) + var obj = { + array: ['abc', true], + num: 235.59 + } + + c.limiter.on('message', function (msg) { + c.mustEqual(JSON.parse(msg), obj) + done() + }) + + c.limiter.publish(JSON.stringify(obj)) + }) + }) + + describe('Reservoir Refresh', function () { + it('Should auto-refresh the reservoir', function () { + c = makeTest({ + reservoir: 8, + reservoirRefreshInterval: 150, + reservoirRefreshAmount: 5, + heartbeatInterval: 75 // not for production use + }) + var calledDepleted = 0 + + c.limiter.on('depleted', function () { + calledDepleted++ + }) + + return Promise.all([ + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1), + c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2), + c.pNoErrVal(c.limiter.schedule({ weight: 3 }, c.promise, null, 3), 3), + c.pNoErrVal(c.limiter.schedule({ weight: 4 }, c.promise, null, 4), 4), + c.pNoErrVal(c.limiter.schedule({ weight: 5 }, c.promise, null, 5), 5) + ]) + .then(function () { + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + return c.last({ weight: 0, priority: 9 }) + }) + .then(function (results) { + c.checkResultsOrder([[1], [2], [3], [4], [5]]) + c.mustEqual(calledDepleted, 2) + c.checkDuration(300) + }) + }) + + it('Should allow staggered X by Y type usage', function () { + c = makeTest({ + reservoir: 2, + reservoirRefreshInterval: 150, + reservoirRefreshAmount: 2, + heartbeatInterval: 75 // not for production use + }) + + return Promise.all([ + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 4), 4) + ]) + .then(function () { + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + return c.last({ weight: 0, priority: 9 }) + }) + .then(function (results) { + c.checkResultsOrder([[1], [2], [3], [4]]) + c.checkDuration(150) + }) + }) + + it('Should keep process alive until queue is empty', function (done) { + c = makeTest() + var options = { + cwd: process.cwd() + '/test/spawn', + timeout: 1000 + } + child_process.exec('node refreshKeepAlive.js', options, function (err, stdout, stderr) { + c.mustEqual(stdout, '[0][0][2][2]') + c.mustEqual(stderr, '') + done(err) + }) + }) + + }) + + describe('Reservoir Increase', function () { + it('Should auto-increase the reservoir', async function () { + c = makeTest({ + reservoir: 3, + reservoirIncreaseInterval: 150, + reservoirIncreaseAmount: 5, + heartbeatInterval: 75 // not for production use + }) + var calledDepleted = 0 + + c.limiter.on('depleted', function () { + calledDepleted++ + }) + + await Promise.all([ + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1), + c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2), + c.pNoErrVal(c.limiter.schedule({ weight: 3 }, c.promise, null, 3), 3), + c.pNoErrVal(c.limiter.schedule({ weight: 4 }, c.promise, null, 4), 4), + c.pNoErrVal(c.limiter.schedule({ weight: 5 }, c.promise, null, 5), 5) + ]) + const reservoir = await c.limiter.currentReservoir() + c.mustEqual(reservoir, 3) + + const results = await c.last({ weight: 0, priority: 9 }) + c.checkResultsOrder([[1], [2], [3], [4], [5]]) + c.mustEqual(calledDepleted, 1) + c.checkDuration(450) + }) + + it('Should auto-increase the reservoir up to a maximum', async function () { + c = makeTest({ + reservoir: 3, + reservoirIncreaseInterval: 150, + reservoirIncreaseAmount: 5, + reservoirIncreaseMaximum: 6, + heartbeatInterval: 75 // not for production use + }) + var calledDepleted = 0 + + c.limiter.on('depleted', function () { + calledDepleted++ + }) + + await Promise.all([ + c.pNoErrVal(c.limiter.schedule({ weight: 1 }, c.promise, null, 1), 1), + c.pNoErrVal(c.limiter.schedule({ weight: 2 }, c.promise, null, 2), 2), + c.pNoErrVal(c.limiter.schedule({ weight: 3 }, c.promise, null, 3), 3), + c.pNoErrVal(c.limiter.schedule({ weight: 4 }, c.promise, null, 4), 4), + c.pNoErrVal(c.limiter.schedule({ weight: 5 }, c.promise, null, 5), 5) + ]) + const reservoir = await c.limiter.currentReservoir() + c.mustEqual(reservoir, 1) + + const results = await c.last({ weight: 0, priority: 9 }) + c.checkResultsOrder([[1], [2], [3], [4], [5]]) + c.mustEqual(calledDepleted, 1) + c.checkDuration(450) + }) + + it('Should allow staggered X by Y type usage', function () { + c = makeTest({ + reservoir: 2, + reservoirIncreaseInterval: 150, + reservoirIncreaseAmount: 2, + heartbeatInterval: 75 // not for production use + }) + + return Promise.all([ + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 4), 4) + ]) + .then(function () { + return c.limiter.currentReservoir() + }) + .then(function (reservoir) { + c.mustEqual(reservoir, 0) + return c.last({ weight: 0, priority: 9 }) + }) + .then(function (results) { + c.checkResultsOrder([[1], [2], [3], [4]]) + c.checkDuration(150) + }) + }) + + it('Should keep process alive until queue is empty', function (done) { + c = makeTest() + var options = { + cwd: process.cwd() + '/test/spawn', + timeout: 1000 + } + child_process.exec('node increaseKeepAlive.js', options, function (err, stdout, stderr) { + c.mustEqual(stdout, '[0][0][2][2]') + c.mustEqual(stderr, '') + done(err) + }) + }) + }) + +}) diff --git a/node_modules/bottleneck/test/group.js b/node_modules/bottleneck/test/group.js new file mode 100644 index 000000000..5c21ab6ff --- /dev/null +++ b/node_modules/bottleneck/test/group.js @@ -0,0 +1,255 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') + +describe('Group', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should create limiters', function (done) { + c = makeTest() + var group = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100 + }) + + var results = [] + + var job = function (...result) { + results.push(result) + return new Promise(function (resolve, reject) { + setTimeout(function () { + return resolve() + }, 50) + }) + } + + group.key('A').schedule(job, 1, 2) + group.key('A').schedule(job, 3) + group.key('A').schedule(job, 4) + setTimeout(function () { + group.key('B').schedule(job, 5) + }, 20) + setTimeout(function () { + group.key('C').schedule(job, 6) + group.key('C').schedule(job, 7) + }, 40) + + group.key('A').submit(function (cb) { + c.mustEqual(results, [[1,2], [5], [6], [3], [7], [4]]) + cb() + done() + }, null) + }) + + it('Should set up the limiter IDs (default)', function () { + c = makeTest() + var group = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100 + }) + + c.mustEqual(group.key('A').id, 'group-key-A') + c.mustEqual(group.key('B').id, 'group-key-B') + c.mustEqual(group.key('XYZ').id, 'group-key-XYZ') + + var ids = group.keys().map(function (key) { + var limiter = group.key(key) + c.mustEqual(limiter._store.timeout, group.timeout) + return limiter.id + }) + c.mustEqual(ids.sort(), ['group-key-A', 'group-key-B', 'group-key-XYZ']) + }) + + it('Should set up the limiter IDs (custom)', function () { + c = makeTest() + var group = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100, + id: 'custom-id' + }) + + c.mustEqual(group.key('A').id, 'custom-id-A') + c.mustEqual(group.key('B').id, 'custom-id-B') + c.mustEqual(group.key('XYZ').id, 'custom-id-XYZ') + + var ids = group.keys().map(function (key) { + var limiter = group.key(key) + c.mustEqual(limiter._store.timeout, group.timeout) + return limiter.id + }) + c.mustEqual(ids.sort(), ['custom-id-A', 'custom-id-B', 'custom-id-XYZ']) + }) + + it('Should pass new limiter to \'created\' event', function () { + c = makeTest() + var group = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100 + }) + + var keys = [] + var ids = [] + var promises = [] + group.on('created', function (created, key) { + keys.push(key) + promises.push( + created.updateSettings({ id: key }) + .then(function (limiter) { + ids.push(limiter.id) + }) + ) + }) + + group.key('A') + group.key('B') + group.key('A') + group.key('B') + group.key('B') + group.key('BB') + group.key('C') + group.key('A') + + return Promise.all(promises) + .then(function () { + c.mustEqual(keys, ids) + return c.limiter.ready() + }) + + }) + + it('Should pass error on failure', function (done) { + var failureMessage = 'SOMETHING BLEW UP!!' + c = makeTest() + var group = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100 + }) + c.mustEqual(Object.keys(group.limiters), []) + + var results = [] + + var job = function (...result) { + results.push(result) + return new Promise(function (resolve, reject) { + setTimeout(function () { + return resolve() + }, 50) + }) + } + + group.key('A').schedule(job, 1, 2) + group.key('A').schedule(job, 3) + group.key('A').schedule(job, 4) + group.key('B').schedule(() => Promise.reject(new Error(failureMessage))) + .catch(function (err) { + results.push(['CAUGHT', err.message]) + }) + setTimeout(function () { + group.key('C').schedule(job, 6) + group.key('C').schedule(job, 7) + }, 40) + + + group.key('A').submit(function (cb) { + c.mustEqual(results, [[1,2], ['CAUGHT', failureMessage], [6], [3], [7], [4]]) + cb() + done() + }, null) + }) + + it('Should update its timeout', function () { + c = makeTest() + var group1 = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100 + }) + var group2 = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100, timeout: 5000 + }) + + c.mustEqual(group1.timeout, 300000) + c.mustEqual(group2.timeout, 5000) + + var p1 = group1.updateSettings({ timeout: 123 }) + var p2 = group2.updateSettings({ timeout: 456 }) + return Promise.all([p1, p2]) + .then(function () { + c.mustEqual(group1.timeout, 123) + c.mustEqual(group2.timeout, 456) + }) + }) + + it('Should update its limiter options', function () { + c = makeTest() + var group = new Bottleneck.Group({ + maxConcurrent: 1, minTime: 100 + }) + + var limiter1 = group.key('AAA') + c.mustEqual(limiter1._store.storeOptions.minTime, 100) + + group.updateSettings({ minTime: 200 }) + c.mustEqual(limiter1._store.storeOptions.minTime, 100) + + var limiter2 = group.key('BBB') + c.mustEqual(limiter2._store.storeOptions.minTime, 200) + }) + + it('Should support keys(), limiters(), deleteKey()', function () { + c = makeTest() + var group1 = new Bottleneck.Group({ + maxConcurrent: 1 + }) + var KEY_A = "AAA" + var KEY_B = "BBB" + + return Promise.all([ + c.pNoErrVal(group1.key(KEY_A).schedule(c.promise, null, 1), 1), + c.pNoErrVal(group1.key(KEY_B).schedule(c.promise, null, 2), 2) + ]) + .then(function () { + var keys = group1.keys() + var limiters = group1.limiters() + c.mustEqual(keys, [KEY_A, KEY_B]) + c.mustEqual(limiters.length, 2) + + limiters.forEach(function (limiter, i) { + c.mustEqual(limiter.key, keys[i]) + assert(limiter.limiter instanceof Bottleneck) + }) + + return group1.deleteKey(KEY_A) + }) + .then(function (deleted) { + c.mustEqual(deleted, true) + c.mustEqual(group1.keys().length, 1) + return group1.deleteKey(KEY_A) + }) + .then(function (deleted) { + c.mustEqual(deleted, false) + c.mustEqual(group1.keys().length, 1) + }) + }) + + it('Should call autocleanup', function () { + var KEY = 'test-key' + var group = new Bottleneck.Group({ + maxConcurrent: 1 + }) + group.updateSettings({ timeout: 50 }) + c = makeTest({ id: 'something', timeout: group.timeout }) + + group.instances[KEY] = c.limiter + return group.key(KEY).schedule(function () { + return Promise.resolve() + }) + .then(function () { + assert(group.instances[KEY] != null) + return new Promise(function (resolve, reject) { + setTimeout(resolve, 100) + }) + }) + .then(function () { + assert(group.instances[KEY] == null) + }) + }) + +}) diff --git a/node_modules/bottleneck/test/ioredis.js b/node_modules/bottleneck/test/ioredis.js new file mode 100644 index 000000000..3a68d84fe --- /dev/null +++ b/node_modules/bottleneck/test/ioredis.js @@ -0,0 +1,135 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') +var Redis = require('ioredis') + +if (process.env.DATASTORE === 'ioredis') { + describe('ioredis-only', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should accept ioredis lib override', function () { + c = makeTest({ + maxConcurrent: 2, + Redis, + clientOptions: {}, + clusterNodes: [{ + host: process.env.REDIS_HOST, + port: process.env.REDIS_PORT + }] + }) + + c.mustEqual(c.limiter.datastore, 'ioredis') + }) + + it('Should connect in Redis Cluster mode', function () { + c = makeTest({ + maxConcurrent: 2, + clientOptions: {}, + clusterNodes: [{ + host: process.env.REDIS_HOST, + port: process.env.REDIS_PORT + }] + }) + + c.mustEqual(c.limiter.datastore, 'ioredis') + assert(c.limiter._store.connection.client.nodes().length >= 0) + }) + + it('Should connect in Redis Cluster mode with premade client', function () { + var client = new Redis.Cluster('') + var connection = new Bottleneck.IORedisConnection({ client }) + c = makeTest({ + maxConcurrent: 2, + clientOptions: {}, + clusterNodes: [{ + host: process.env.REDIS_HOST, + port: process.env.REDIS_PORT + }] + }) + + c.mustEqual(c.limiter.datastore, 'ioredis') + assert(c.limiter._store.connection.client.nodes().length >= 0) + connection.disconnect(false) + }) + + it('Should accept existing connections', function () { + var connection = new Bottleneck.IORedisConnection() + connection.id = 'super-connection' + c = makeTest({ + minTime: 50, + connection + }) + + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) + + return c.last() + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(50) + c.mustEqual(c.limiter.connection.id, 'super-connection') + c.mustEqual(c.limiter.datastore, 'ioredis') + + return c.limiter.disconnect() + }) + .then(function () { + // Shared connections should not be disconnected by the limiter + c.mustEqual(c.limiter.clients().client.status, 'ready') + return connection.disconnect() + }) + }) + + it('Should accept existing redis clients', function () { + var client = new Redis() + client.id = 'super-client' + + var connection = new Bottleneck.IORedisConnection({ client }) + connection.id = 'super-connection' + c = makeTest({ + minTime: 50, + connection + }) + + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) + + return c.last() + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(50) + c.mustEqual(c.limiter.clients().client.id, 'super-client') + c.mustEqual(c.limiter.connection.id, 'super-connection') + c.mustEqual(c.limiter.datastore, 'ioredis') + + return c.limiter.disconnect() + }) + .then(function () { + // Shared connections should not be disconnected by the limiter + c.mustEqual(c.limiter.clients().client.status, 'ready') + return connection.disconnect() + }) + }) + + it('Should trigger error events on the shared connection', function (done) { + var connection = new Bottleneck.IORedisConnection({ + clientOptions: { + port: 1 + } + }) + connection.on('error', function (err) { + c.mustEqual(c.limiter.datastore, 'ioredis') + connection.disconnect() + done() + }) + + c = makeTest({ connection }) + c.limiter.on('error', function (err) { + done(err) + }) + }) + }) +} diff --git a/node_modules/bottleneck/test/node_redis.js b/node_modules/bottleneck/test/node_redis.js new file mode 100644 index 000000000..cd204d3e7 --- /dev/null +++ b/node_modules/bottleneck/test/node_redis.js @@ -0,0 +1,100 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') +var Redis = require('redis') + +if (process.env.DATASTORE === 'redis') { + describe('node_redis-only', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should accept node_redis lib override', function () { + c = makeTest({ + maxConcurrent: 2, + Redis, + clientOptions: {} + }) + + c.mustEqual(c.limiter.datastore, 'redis') + }) + + it('Should accept existing connections', function () { + var connection = new Bottleneck.RedisConnection() + connection.id = 'super-connection' + c = makeTest({ + minTime: 50, + connection + }) + + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) + + return c.last() + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(50) + c.mustEqual(c.limiter.connection.id, 'super-connection') + c.mustEqual(c.limiter.datastore, 'redis') + + return c.limiter.disconnect() + }) + .then(function () { + // Shared connections should not be disconnected by the limiter + c.mustEqual(c.limiter.clients().client.ready, true) + return connection.disconnect() + }) + }) + + it('Should accept existing redis clients', function () { + var client = Redis.createClient() + client.id = 'super-client' + + var connection = new Bottleneck.RedisConnection({ client }) + connection.id = 'super-connection' + c = makeTest({ + minTime: 50, + connection + }) + + c.pNoErrVal(c.limiter.schedule(c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2) + + return c.last() + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(50) + c.mustEqual(c.limiter.clients().client.id, 'super-client') + c.mustEqual(c.limiter.connection.id, 'super-connection') + c.mustEqual(c.limiter.datastore, 'redis') + + return c.limiter.disconnect() + }) + .then(function () { + // Shared connections should not be disconnected by the limiter + c.mustEqual(c.limiter.clients().client.ready, true) + return connection.disconnect() + }) + }) + + it('Should trigger error events on the shared connection', function (done) { + var connection = new Bottleneck.RedisConnection({ + clientOptions: { + port: 1 + } + }) + connection.on('error', function (err) { + c.mustEqual(c.limiter.datastore, 'redis') + connection.disconnect() + done() + }) + + c = makeTest({ connection }) + c.limiter.on('error', function (err) { + done(err) + }) + }) + }) +} diff --git a/node_modules/bottleneck/test/priority.js b/node_modules/bottleneck/test/priority.js new file mode 100644 index 000000000..f89b85f41 --- /dev/null +++ b/node_modules/bottleneck/test/priority.js @@ -0,0 +1,184 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') + +describe('Priority', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should do basic ordering', function () { + c = makeTest({maxConcurrent: 1, minTime: 100, rejectOnDrop: false}) + + return Promise.all([ + c.pNoErrVal(c.limiter.schedule(c.slowPromise, 50, null, 1), 1), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 2), 2), + c.pNoErrVal(c.limiter.schedule({priority: 1}, c.promise, null, 5, 6), 5, 6), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 3), 3), + c.pNoErrVal(c.limiter.schedule(c.promise, null, 4), 4) + ]) + .then(function () { + return c.last() + }) + .then(function (results) { + c.checkResultsOrder([[1], [5,6], [2] ,[3], [4]]) + c.checkDuration(400) + }) + }) + + it('Should support LEAK', function () { + c = makeTest({ + maxConcurrent: 1, + minTime: 100, + highWater: 3, + strategy: Bottleneck.strategy.LEAK, + rejectOnDrop: false + }) + + var called = false + c.limiter.on('dropped', function (dropped) { + c.mustExist(dropped.task) + c.mustExist(dropped.args) + c.mustExist(dropped.promise) + called = true + }) + + c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1)) + c.limiter.submit(c.job, null, 2, c.noErrVal(2)) + c.limiter.submit(c.job, null, 3, c.noErrVal(3)) + c.limiter.submit(c.job, null, 4, c.noErrVal(4)) + c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5)) + c.limiter.submit({priority: 1}, c.job, null, 6, c.noErrVal(6)) + c.limiter.submit({priority: 9}, c.job, null, 7, c.noErrVal(7)) + + return c.last({ weight: 0 }) + .then(function (results) { + c.checkDuration(200) + c.checkResultsOrder([[1], [6], [5]]) + c.mustEqual(called, true) + }) + }) + + it('Should support OVERFLOW', function () { + c = makeTest({ + maxConcurrent: 1, + minTime: 100, + highWater: 2, + strategy: Bottleneck.strategy.OVERFLOW, + rejectOnDrop: false + }) + var called = false + c.limiter.on('dropped', function (dropped) { + c.mustExist(dropped.task) + c.mustExist(dropped.args) + c.mustExist(dropped.promise) + called = true + }) + + c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1)) + c.limiter.submit(c.job, null, 2, c.noErrVal(2)) + c.limiter.submit(c.job, null, 3, c.noErrVal(3)) + c.limiter.submit(c.job, null, 4, c.noErrVal(4)) + c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5)) + c.limiter.submit({priority: 1}, c.job, null, 6, c.noErrVal(6)) + + return c.limiter.submit({priority: 9}, c.job, null, 7, c.noErrVal(7)) + .then(function () { + return c.limiter.updateSettings({ highWater: null }) + }) + .then(c.last) + .then(function (results) { + c.checkDuration(200) + c.checkResultsOrder([[1], [2], [3]]) + c.mustEqual(called, true) + }) + }) + + it('Should support OVERFLOW_PRIORITY', function () { + c = makeTest({ + maxConcurrent: 1, + minTime: 100, + highWater: 2, + strategy: Bottleneck.strategy.OVERFLOW_PRIORITY, + rejectOnDrop: false + }) + var called = false + c.limiter.on('dropped', function (dropped) { + c.mustExist(dropped.task) + c.mustExist(dropped.args) + c.mustExist(dropped.promise) + called = true + }) + + c.limiter.submit(c.slowJob, 50, null, 1, c.noErrVal(1)) + c.limiter.submit(c.job, null, 2, c.noErrVal(2)) + c.limiter.submit(c.job, null, 3, c.noErrVal(3)) + c.limiter.submit(c.job, null, 4, c.noErrVal(4)) + c.limiter.submit({priority: 2}, c.job, null, 5, c.noErrVal(5)) + c.limiter.submit({priority: 2}, c.job, null, 6, c.noErrVal(6)) + + return c.limiter.submit({priority: 2}, c.job, null, 7, c.noErrVal(7)) + .then(function () { + return c.limiter.updateSettings({highWater: null}) + }) + .then(c.last) + .then(function (results) { + c.checkDuration(200) + c.checkResultsOrder([[1], [5], [6]]) + c.mustEqual(called, true) + }) + }) + + it('Should support BLOCK', function (done) { + c = makeTest({ + maxConcurrent: 1, + minTime: 100, + highWater: 2, + trackDoneStatus: true, + strategy: Bottleneck.strategy.BLOCK + }) + var called = 0 + + c.limiter.on('dropped', function (dropped) { + c.mustExist(dropped.task) + c.mustExist(dropped.args) + c.mustExist(dropped.promise) + called++ + if (called === 3) { + c.limiter.updateSettings({ highWater: null }) + .then(function () { + return c.limiter.schedule(c.job, null, 8) + }) + .catch(function (err) { + assert(err instanceof Bottleneck.BottleneckError) + c.mustEqual(err.message, 'This job has been dropped by Bottleneck') + c.limiter.removeAllListeners('error') + done() + }) + } + }) + + c.limiter.submit(c.slowJob, 20, null, 1, c.noErrVal(1)) + c.limiter.submit(c.slowJob, 20, null, 2, (err) => c.mustExist(err)) + c.limiter.submit(c.slowJob, 20, null, 3, (err) => c.mustExist(err)) + c.limiter.submit(c.slowJob, 20, null, 4, (err) => c.mustExist(err)) + }) + + it('Should have the right priority', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + c.pNoErrVal(c.limiter.schedule({priority: 6}, c.slowPromise, 50, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({priority: 5}, c.promise, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({priority: 4}, c.promise, null, 3), 3) + c.pNoErrVal(c.limiter.schedule({priority: 3}, c.promise, null, 4), 4) + + return c.last() + .then(function (results) { + c.checkDuration(300) + c.checkResultsOrder([[1], [4], [3], [2]]) + }) + }) + +}) diff --git a/node_modules/bottleneck/test/promises.js b/node_modules/bottleneck/test/promises.js new file mode 100644 index 000000000..b20022f0e --- /dev/null +++ b/node_modules/bottleneck/test/promises.js @@ -0,0 +1,202 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') + +describe('Promises', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should support promises', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + c.limiter.submit(c.job, null, 1, 9, c.noErrVal(1, 9)) + c.limiter.submit(c.job, null, 2, c.noErrVal(2)) + c.limiter.submit(c.job, null, 3, c.noErrVal(3)) + c.pNoErrVal(c.limiter.schedule(c.promise, null, 4, 5), 4, 5) + + return c.last() + .then(function (results) { + c.checkResultsOrder([[1,9], [2], [3], [4,5]]) + c.checkDuration(300) + }) + }) + + it('Should pass error on failure', function () { + var failureMessage = 'failed' + c = makeTest({maxConcurrent: 1, minTime: 100}) + + return c.limiter.schedule(c.promise, new Error(failureMessage)) + .catch(function (err) { + c.mustEqual(err.message, failureMessage) + }) + }) + + it('Should allow non-Promise returns', function () { + c = makeTest() + var str = 'This is a string' + + return c.limiter.schedule(() => str) + .then(function (x) { + c.mustEqual(x, str) + }) + }) + + it('Should get rejected when rejectOnDrop is true', function () { + c = makeTest({ + maxConcurrent: 1, + minTime: 0, + highWater: 1, + strategy: Bottleneck.strategy.OVERFLOW, + rejectOnDrop: true + }) + var dropped = 0 + var caught = 0 + var p1 + var p2 + + c.limiter.on('dropped', function () { + dropped++ + }) + + p1 = c.pNoErrVal(c.limiter.schedule({id: 1}, c.slowPromise, 50, null, 1), 1) + p2 = c.pNoErrVal(c.limiter.schedule({id: 2}, c.slowPromise, 50, null, 2), 2) + + return c.limiter.schedule({id: 3}, c.slowPromise, 50, null, 3) + .catch(function (err) { + c.mustEqual(err.message, 'This job has been dropped by Bottleneck') + assert(err instanceof Bottleneck.BottleneckError) + caught++ + return Promise.all([p1, p2]) + }) + .then(c.last) + .then(function (results) { + c.checkResultsOrder([[1], [2]]) + c.checkDuration(100) + c.mustEqual(dropped, 1) + c.mustEqual(caught, 1) + }) + }) + + it('Should automatically wrap an exception in a rejected promise - schedule()', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + return c.limiter.schedule(() => { + throw new Error('I will reject') + }) + .then(() => assert(false)) + .catch(err => { + assert(err.message === 'I will reject'); + }) + }) + + describe('Wrap', function () { + it('Should wrap', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + c.limiter.submit(c.job, null, 1, c.noErrVal(1)) + c.limiter.submit(c.job, null, 2, c.noErrVal(2)) + c.limiter.submit(c.job, null, 3, c.noErrVal(3)) + + var wrapped = c.limiter.wrap(c.promise) + c.pNoErrVal(wrapped(null, 4), 4) + + return c.last() + .then(function (results) { + c.checkResultsOrder([[1], [2], [3], [4]]) + c.checkDuration(300) + }) + }) + + it('Should automatically wrap a returned value in a resolved promise', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + fn = c.limiter.wrap(() => { return 7 }); + + return fn().then(result => { + assert(result === 7); + }) + }) + + it('Should automatically wrap an exception in a rejected promise', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + fn = c.limiter.wrap(() => { throw new Error('I will reject') }); + + return fn().then(() => assert(false)).catch(error => { + assert(error.message === 'I will reject'); + }) + }) + + it('Should inherit the original target for wrapped methods', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + var object = { + fn: c.limiter.wrap(function () { return this }) + } + + return object.fn().then(result => { + assert(result === object) + }) + }) + + it('Should inherit the original target on prototype methods', function () { + c = makeTest({maxConcurrent: 1, minTime: 100}) + + class Animal { + constructor(name) { this.name = name } + getName() { return this.name } + } + + Animal.prototype.getName = c.limiter.wrap(Animal.prototype.getName) + let elephant = new Animal('Dumbo') + + return elephant.getName().then(result => { + assert(result === 'Dumbo') + }) + }) + + it('Should pass errors back', function () { + var failureMessage = 'BLEW UP!!!' + c = makeTest({maxConcurrent: 1, minTime: 100}) + + var wrapped = c.limiter.wrap(c.promise) + c.pNoErrVal(wrapped(null, 1), 1) + c.pNoErrVal(wrapped(null, 2), 2) + + return wrapped(new Error(failureMessage), 3) + .catch(function (err) { + c.mustEqual(err.message, failureMessage) + return c.last() + }) + .then(function (results) { + c.checkResultsOrder([[1], [2], [3]]) + c.checkDuration(200) + }) + }) + + it('Should allow passing options', function () { + var failureMessage = 'BLEW UP!!!' + c = makeTest({maxConcurrent: 1, minTime: 50}) + + var wrapped = c.limiter.wrap(c.promise) + c.pNoErrVal(wrapped(null, 1), 1) + c.pNoErrVal(wrapped(null, 2), 2) + c.pNoErrVal(wrapped(null, 3), 3) + c.pNoErrVal(wrapped(null, 4), 4) + c.pNoErrVal(wrapped.withOptions({ priority: 1 }, null, 5), 5) + + return wrapped.withOptions({ priority: 1 }, new Error(failureMessage), 6) + .catch(function (err) { + c.mustEqual(err.message, failureMessage) + return c.last() + }) + .then(function (results) { + c.checkResultsOrder([[1], [2], [5], [6], [3], [4]]) + c.checkDuration(250) + }) + }) + }) +}) diff --git a/node_modules/bottleneck/test/retries.js b/node_modules/bottleneck/test/retries.js new file mode 100644 index 000000000..7570516fd --- /dev/null +++ b/node_modules/bottleneck/test/retries.js @@ -0,0 +1,237 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') +var child_process = require('child_process') + +describe('Retries', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should retry when requested by the user (sync)', async function () { + c = makeTest({ trackDoneStatus: true }) + var failedEvents = 0 + var retryEvents = 0 + + c.limiter.on('failed', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + c.mustEqual(info.retryCount, failedEvents) + failedEvents++ + return 50 + }) + + c.limiter.on('retry', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + retryEvents++ + }) + + var times = 0 + const job = function () { + times++ + if (times <= 2) { + return Promise.reject(new Error('boom')) + } + return Promise.resolve('Success!') + } + + c.mustEqual(await c.limiter.schedule(job), 'Success!') + const results = await c.results() + assert(results.elapsed > 90 && results.elapsed < 130) + c.mustEqual(failedEvents, 2) + c.mustEqual(retryEvents, 2) + c.mustEqual(c.limiter.counts().EXECUTING, 0) + c.mustEqual(c.limiter.counts().DONE, 1) + }) + + it('Should retry when requested by the user (async)', async function () { + c = makeTest({ trackDoneStatus: true }) + var failedEvents = 0 + var retryEvents = 0 + + c.limiter.on('failed', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + c.mustEqual(info.retryCount, failedEvents) + failedEvents++ + return Promise.resolve(50) + }) + + c.limiter.on('retry', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + retryEvents++ + }) + + var times = 0 + const job = function () { + times++ + if (times <= 2) { + return Promise.reject(new Error('boom')) + } + return Promise.resolve('Success!') + } + + c.mustEqual(await c.limiter.schedule(job), 'Success!') + const results = await c.results() + assert(results.elapsed > 90 && results.elapsed < 130) + c.mustEqual(failedEvents, 2) + c.mustEqual(retryEvents, 2) + c.mustEqual(c.limiter.counts().EXECUTING, 0) + c.mustEqual(c.limiter.counts().DONE, 1) + }) + + it('Should not retry when user returns an error (sync)', async function () { + c = makeTest({ errorEventsExpected: true, trackDoneStatus: true }) + var failedEvents = 0 + var retryEvents = 0 + var errorEvents = 0 + var caught = false + + c.limiter.on('failed', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + c.mustEqual(info.retryCount, failedEvents) + failedEvents++ + throw new Error('Nope') + }) + + c.limiter.on('retry', function (error, info) { + retryEvents++ + }) + + c.limiter.on('error', function (error, info) { + c.mustEqual(error.message, 'Nope') + errorEvents++ + }) + + const job = function () { + return Promise.reject(new Error('boom')) + } + + try { + await c.limiter.schedule(job) + throw new Error('Should not reach') + } catch (error) { + c.mustEqual(error.message, 'boom') + caught = true + } + c.mustEqual(failedEvents, 1) + c.mustEqual(retryEvents, 0) + c.mustEqual(errorEvents, 1) + c.mustEqual(caught, true) + c.mustEqual(c.limiter.counts().EXECUTING, 0) + c.mustEqual(c.limiter.counts().DONE, 1) + }) + + it('Should not retry when user returns an error (async)', async function () { + c = makeTest({ errorEventsExpected: true, trackDoneStatus: true }) + var failedEvents = 0 + var retryEvents = 0 + var errorEvents = 0 + var caught = false + + c.limiter.on('failed', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + c.mustEqual(info.retryCount, failedEvents) + failedEvents++ + return Promise.reject(new Error('Nope')) + }) + + c.limiter.on('retry', function (error, info) { + retryEvents++ + }) + + c.limiter.on('error', function (error, info) { + c.mustEqual(error.message, 'Nope') + errorEvents++ + }) + + const job = function () { + return Promise.reject(new Error('boom')) + } + + try { + await c.limiter.schedule(job) + throw new Error('Should not reach') + } catch (error) { + c.mustEqual(error.message, 'boom') + caught = true + } + c.mustEqual(failedEvents, 1) + c.mustEqual(retryEvents, 0) + c.mustEqual(errorEvents, 1) + c.mustEqual(caught, true) + c.mustEqual(c.limiter.counts().EXECUTING, 0) + c.mustEqual(c.limiter.counts().DONE, 1) + }) + + it('Should not retry when user returns null (sync)', async function () { + c = makeTest({ trackDoneStatus: true }) + var failedEvents = 0 + var retryEvents = 0 + var caught = false + + c.limiter.on('failed', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + c.mustEqual(info.retryCount, failedEvents) + failedEvents++ + return null + }) + + c.limiter.on('retry', function (error, info) { + retryEvents++ + }) + + const job = function () { + return Promise.reject(new Error('boom')) + } + + try { + await c.limiter.schedule(job) + throw new Error('Should not reach') + } catch (error) { + c.mustEqual(error.message, 'boom') + caught = true + } + c.mustEqual(failedEvents, 1) + c.mustEqual(retryEvents, 0) + c.mustEqual(caught, true) + c.mustEqual(c.limiter.counts().EXECUTING, 0) + c.mustEqual(c.limiter.counts().DONE, 1) + }) + + it('Should not retry when user returns null (async)', async function () { + c = makeTest({ trackDoneStatus: true }) + var failedEvents = 0 + var retryEvents = 0 + var caught = false + + c.limiter.on('failed', function (error, info) { + c.mustEqual(c.limiter.counts().EXECUTING, 1) + c.mustEqual(info.retryCount, failedEvents) + failedEvents++ + return Promise.resolve(null) + }) + + c.limiter.on('retry', function (error, info) { + retryEvents++ + }) + + const job = function () { + return Promise.reject(new Error('boom')) + } + + try { + await c.limiter.schedule(job) + throw new Error('Should not reach') + } catch (error) { + c.mustEqual(error.message, 'boom') + caught = true + } + c.mustEqual(failedEvents, 1) + c.mustEqual(retryEvents, 0) + c.mustEqual(caught, true) + c.mustEqual(c.limiter.counts().EXECUTING, 0) + c.mustEqual(c.limiter.counts().DONE, 1) + }) + +}) diff --git a/node_modules/bottleneck/test/spawn/increaseKeepAlive.js b/node_modules/bottleneck/test/spawn/increaseKeepAlive.js new file mode 100644 index 000000000..4bea612cd --- /dev/null +++ b/node_modules/bottleneck/test/spawn/increaseKeepAlive.js @@ -0,0 +1,17 @@ +var Bottleneck = require('../bottleneck.js') +var now = Date.now() + +var limiter = new Bottleneck({ + reservoir: 2, + reservoirIncreaseAmount: 2, + reservoirIncreaseInterval: 200 +}) +var f1 = () => { + var secDiff = Math.floor((Date.now() - now) / 100) + return Promise.resolve(`[${secDiff}]`) +} + +limiter.schedule(f1).then((x) => process.stdout.write(x)) +limiter.schedule(f1).then((x) => process.stdout.write(x)) +limiter.schedule(f1).then((x) => process.stdout.write(x)) +limiter.schedule(f1).then((x) => process.stdout.write(x)) diff --git a/node_modules/bottleneck/test/spawn/refreshKeepAlive.js b/node_modules/bottleneck/test/spawn/refreshKeepAlive.js new file mode 100644 index 000000000..deb09926d --- /dev/null +++ b/node_modules/bottleneck/test/spawn/refreshKeepAlive.js @@ -0,0 +1,17 @@ +var Bottleneck = require('../bottleneck.js') +var now = Date.now() + +var limiter = new Bottleneck({ + reservoir: 2, + reservoirRefreshAmount: 2, + reservoirRefreshInterval: 200 +}) +var f1 = () => { + var secDiff = Math.floor((Date.now() - now) / 100) + return Promise.resolve(`[${secDiff}]`) +} + +limiter.schedule(f1).then((x) => process.stdout.write(x)) +limiter.schedule(f1).then((x) => process.stdout.write(x)) +limiter.schedule(f1).then((x) => process.stdout.write(x)) +limiter.schedule(f1).then((x) => process.stdout.write(x)) diff --git a/node_modules/bottleneck/test/states.js b/node_modules/bottleneck/test/states.js new file mode 100644 index 000000000..c65ed77ff --- /dev/null +++ b/node_modules/bottleneck/test/states.js @@ -0,0 +1,103 @@ +var States = require('../lib/States') +var assert = require('assert') +var c = require('./context')({datastore: 'local'}) +var Bottleneck = require('./bottleneck') + +describe('States', function () { + + it('Should be created and be empty', function () { + var states = new States(["A", "B", "C"]) + c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 }) + }) + + it('Should start new series', function () { + var states = new States(["A", "B", "C"]) + + states.start('x') + states.start('y') + + c.mustEqual(states.statusCounts(), { A: 2, B: 0, C: 0 }) + }) + + it('Should increment', function () { + var states = new States(["A", "B", "C"]) + + states.start('x') + states.start('y') + states.next('x') + states.next('y') + states.next('x') + c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) + + states.next('z') + c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) + + states.next('x') + c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 }) + + states.next('x') + c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 }) + + states.next('y') + states.next('y') + c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 }) + }) + + it('Should remove', function () { + var states = new States(["A", "B", "C"]) + + states.start('x') + states.start('y') + states.next('x') + states.next('y') + states.next('x') + c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) + + states.remove('x') + c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 0 }) + + states.remove('y') + c.mustEqual(states.statusCounts(), { A: 0, B: 0, C: 0 }) + }) + + it('Should return current status', function () { + var states = new States(["A", "B", "C"]) + + states.start('x') + states.start('y') + states.next('x') + states.next('y') + states.next('x') + c.mustEqual(states.statusCounts(), { A: 0, B: 1, C: 1 }) + + c.mustEqual(states.jobStatus('x'), 'C') + c.mustEqual(states.jobStatus('y'), 'B') + c.mustEqual(states.jobStatus('z'), null) + }) + + it('Should return job ids for a status', function (done) { + var states = new States(["A", "B", "C"]) + + states.start('x') + states.start('y') + states.start('z') + states.next('x') + states.next('y') + states.next('x') + states.next('z') + c.mustEqual(states.statusCounts(), { A: 0, B: 2, C: 1 }) + + c.mustEqual(states.statusJobs().sort(), ['x', 'y', 'z']) + c.mustEqual(states.statusJobs('A'), []) + c.mustEqual(states.statusJobs('B').sort(), ['y', 'z']) + c.mustEqual(states.statusJobs('C'), ['x']) + try { + states.statusJobs('Z') + } catch (err) { + if (process.env.BUILD !== 'es5' && process.env.BUILD !== 'light') { + assert(err instanceof Bottleneck.BottleneckError) + } + done() + } + }) +}) diff --git a/node_modules/bottleneck/test/stop.js b/node_modules/bottleneck/test/stop.js new file mode 100644 index 000000000..2300e4f47 --- /dev/null +++ b/node_modules/bottleneck/test/stop.js @@ -0,0 +1,208 @@ +var makeTest = require('./context') +var Bottleneck = require('./bottleneck') +var assert = require('assert') + +describe('Stop', function () { + var c + + afterEach(function () { + return c.limiter.disconnect(false) + }) + + it('Should stop and drop the queue', function (done) { + c = makeTest({ + maxConcurrent: 2, + minTime: 100, + trackDoneStatus: true + }) + var submitFailed = false + var queuedDropped = false + var scheduledDropped = false + var dropped = 0 + + c.limiter.on('dropped', function () { + dropped++ + }) + + c.pNoErrVal(c.limiter.schedule({id: '0'}, c.promise, null, 0), 0) + + c.pNoErrVal(c.limiter.schedule({id: '1'}, c.slowPromise, 100, null, 1), 1) + + c.limiter.schedule({id: '2'}, c.promise, null, 2) + .catch(function (err) { + c.mustEqual(err.message, 'Dropped!') + scheduledDropped = true + }) + + c.limiter.schedule({id: '3'}, c.promise, null, 3) + .catch(function (err) { + c.mustEqual(err.message, 'Dropped!') + queuedDropped = true + }) + + setTimeout(function () { + var counts = c.limiter.counts() + c.mustEqual(counts.RECEIVED, 0) + c.mustEqual(counts.QUEUED, 1) + c.mustEqual(counts.RUNNING, 1) + c.mustEqual(counts.EXECUTING, 1) + c.mustEqual(counts.DONE, 1) + + c.limiter.stop({ + enqueueErrorMessage: 'Stopped!', + dropErrorMessage: 'Dropped!' + }) + .then(function () { + counts = c.limiter.counts() + c.mustEqual(submitFailed, true) + c.mustEqual(scheduledDropped, true) + c.mustEqual(queuedDropped, true) + c.mustEqual(dropped, 2) + c.mustEqual(counts.RECEIVED, 0) + c.mustEqual(counts.QUEUED, 0) + c.mustEqual(counts.RUNNING, 0) + c.mustEqual(counts.EXECUTING, 0) + c.mustEqual(counts.DONE, 2) + + c.checkResultsOrder([[0], [1]]) + done() + }) + + c.limiter.schedule(() => Promise.resolve(true)) + .catch(function (err) { + c.mustEqual(err.message, 'Stopped!') + submitFailed = true + }) + + }, 125) + }) + + it('Should stop and let the queue finish', function (done) { + c = makeTest({ + maxConcurrent: 1, + minTime: 100, + trackDoneStatus: true + }) + var submitFailed = false + var dropped = 0 + + c.limiter.on('dropped', function () { + dropped++ + }) + + c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3) + + setTimeout(function () { + var counts = c.limiter.counts() + c.mustEqual(counts.RECEIVED, 0) + c.mustEqual(counts.QUEUED, 1) + c.mustEqual(counts.RUNNING, 1) + c.mustEqual(counts.EXECUTING, 0) + c.mustEqual(counts.DONE, 1) + + c.limiter.stop({ + enqueueErrorMessage: 'Stopped!', + dropWaitingJobs: false + }) + .then(function () { + counts = c.limiter.counts() + c.mustEqual(submitFailed, true) + c.mustEqual(dropped, 0) + c.mustEqual(counts.RECEIVED, 0) + c.mustEqual(counts.QUEUED, 0) + c.mustEqual(counts.RUNNING, 0) + c.mustEqual(counts.EXECUTING, 0) + c.mustEqual(counts.DONE, 4) + + c.checkResultsOrder([[1], [2], [3]]) + done() + }) + + c.limiter.schedule(() => Promise.resolve(true)) + .catch(function (err) { + c.mustEqual(err.message, 'Stopped!') + submitFailed = true + }) + + }, 75) + }) + + it('Should still resolve when rejectOnDrop is false', function (done) { + c = makeTest({ + maxConcurrent: 1, + minTime: 100, + rejectOnDrop: false + }) + + c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3) + + c.limiter.stop() + .then(function () { + return c.limiter.stop() + }) + .then(function () { + done(new Error("Should not be here")) + }) + .catch(function (err) { + c.mustEqual(err.message, "stop() has already been called") + done() + }) + }) + + it('Should not allow calling stop() twice when dropWaitingJobs=true', function (done) { + c = makeTest({ + maxConcurrent: 1, + minTime: 100 + }) + var failed = 0 + var handler = function (err) { + c.mustEqual(err.message, "This limiter has been stopped.") + failed++ + } + + c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1).catch(handler) + c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2).catch(handler) + c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3).catch(handler) + + c.limiter.stop({ dropWaitingJobs: true }) + .then(function () { + return c.limiter.stop({ dropWaitingJobs: true }) + }) + .then(function () { + done(new Error("Should not be here")) + }) + .catch(function (err) { + c.mustEqual(err.message, "stop() has already been called") + c.mustEqual(failed, 3) + done() + }) + }) + + it('Should not allow calling stop() twice when dropWaitingJobs=false', function (done) { + c = makeTest({ + maxConcurrent: 1, + minTime: 100 + }) + + c.pNoErrVal(c.limiter.schedule({id: '1'}, c.promise, null, 1), 1) + c.pNoErrVal(c.limiter.schedule({id: '2'}, c.promise, null, 2), 2) + c.pNoErrVal(c.limiter.schedule({id: '3'}, c.slowPromise, 100, null, 3), 3) + + c.limiter.stop({ dropWaitingJobs: false }) + .then(function () { + return c.limiter.stop({ dropWaitingJobs: false }) + }) + .then(function () { + done(new Error("Should not be here")) + }) + .catch(function (err) { + c.mustEqual(err.message, "stop() has already been called") + done() + }) + }) + +}) diff --git a/package-lock.json b/package-lock.json index 455a89edc..441d94c4f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -247,6 +247,15 @@ "deprecation": "^2.3.1" } }, + "@octokit/plugin-retry": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@octokit/plugin-retry/-/plugin-retry-3.0.3.tgz", + "integrity": "sha512-RFvcBg4JGyfWTWE35EIF7jf/eglIk5MuvfygzdIMLIVK3/4Ywz3X1x9Ri75nlyAmk53EpVWB4DwM/xEB1NXxXA==", + "requires": { + "@octokit/types": "^5.0.0", + "bottleneck": "^2.15.3" + } + }, "@octokit/request": { "version": "5.4.9", "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.4.9.tgz", @@ -883,6 +892,11 @@ "integrity": "sha512-Zc6sowqlCWu3+V0bocZwdaPPXlRv14EHtYcQDCOghj9EdyKLMkAOODBh3HHAx5r7QRylDYCOaXa/b/edgBLDpA==", "dev": true }, + "bottleneck": { + "version": "2.19.5", + "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz", + "integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==" + }, "boxen": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/boxen/-/boxen-4.2.0.tgz", diff --git a/package.json b/package.json index 48c42febb..72e655744 100644 --- a/package.json +++ b/package.json @@ -24,6 +24,7 @@ "@actions/github": "^4.0.0", "@actions/http-client": "^1.0.8", "@actions/tool-cache": "^1.5.5", + "@octokit/plugin-retry": "^3.0.3", "commander": "^6.0.0", "console-log-level": "^1.4.1", "file-url": "^3.0.0", diff --git a/src/api-client.ts b/src/api-client.ts index 65761743b..e80b42bfc 100644 --- a/src/api-client.ts +++ b/src/api-client.ts @@ -1,4 +1,5 @@ -import * as github from "@actions/github"; +import * as githubUtils from "@actions/github/lib/utils"; +import * as retry from "@octokit/plugin-retry"; import consoleLogLevel from "console-log-level"; import * as path from "path"; @@ -13,11 +14,14 @@ export const getApiClient = function ( if (isLocalRun() && !allowLocalRun) { throw new Error("Invalid API call in local run"); } - return github.getOctokit(githubAuth, { - baseUrl: getApiUrl(githubUrl), - userAgent: "CodeQL Action", - log: consoleLogLevel({ level: "debug" }), - }); + const retryingOctokit = githubUtils.GitHub.plugin(retry.retry); + return new retryingOctokit( + githubUtils.getOctokitOptions(githubAuth, { + baseUrl: getApiUrl(githubUrl), + userAgent: "CodeQL Action", + log: consoleLogLevel({ level: "debug" }), + }) + ); }; function getApiUrl(githubUrl: string): string { diff --git a/src/upload-lib.ts b/src/upload-lib.ts index 187f8bd3b..7f5f2545e 100644 --- a/src/upload-lib.ts +++ b/src/upload-lib.ts @@ -53,72 +53,20 @@ async function uploadPayload( return; } - // Make up to 4 attempts to upload, and sleep for these - // number of seconds between each attempt. - // We don't want to backoff too much to avoid wasting action - // minutes, but just waiting a little bit could maybe help. - const backoffPeriods = [1, 5, 15]; - const client = api.getApiClient(githubAuth, githubUrl); - for (let attempt = 0; attempt <= backoffPeriods.length; attempt++) { - const reqURL = - mode === "actions" - ? "PUT /repos/:owner/:repo/code-scanning/analysis" - : "POST /repos/:owner/:repo/code-scanning/sarifs"; - const response = await client.request(reqURL, { - owner: repositoryNwo.owner, - repo: repositoryNwo.repo, - data: payload, - }); - - logger.debug(`response status: ${response.status}`); - - const statusCode = response.status; - if (statusCode === 202) { - logger.info("Successfully uploaded results"); - return; - } - - const requestID = response.headers["x-github-request-id"]; - - // On any other status code that's not 5xx mark the upload as failed - if (!statusCode || statusCode < 500 || statusCode >= 600) { - throw new Error( - `Upload failed (${requestID}): (${statusCode}) ${JSON.stringify( - response.data - )}` - ); - } - - // On a 5xx status code we may retry the request - if (attempt < backoffPeriods.length) { - // Log the failure as a warning but don't mark the action as failed yet - logger.warning( - `Upload attempt (${attempt + 1} of ${ - backoffPeriods.length + 1 - }) failed (${requestID}). Retrying in ${ - backoffPeriods[attempt] - } seconds: (${statusCode}) ${JSON.stringify(response.data)}` - ); - // Sleep for the backoff period - await new Promise((r) => setTimeout(r, backoffPeriods[attempt] * 1000)); - continue; - } else { - // If the upload fails with 5xx then we assume it is a temporary problem - // and not an error that the user has caused or can fix. - // We avoid marking the job as failed to avoid breaking CI workflows. - throw new Error( - `Upload failed (${requestID}): (${statusCode}) ${JSON.stringify( - response.data - )}` - ); - } - } - - // This case shouldn't ever happen as the final iteration of the loop - // will always throw an error instead of exiting to here. - throw new Error("Upload failed"); + const reqURL = + mode === "actions" + ? "PUT /repos/:owner/:repo/code-scanning/analysis" + : "POST /repos/:owner/:repo/code-scanning/sarifs"; + const response = await client.request(reqURL, { + owner: repositoryNwo.owner, + repo: repositoryNwo.repo, + data: payload, + }); + + logger.debug(`response status: ${response.status}`); + logger.info("Successfully uploaded results"); } export interface UploadStatusReport {