diff --git a/README.md b/README.md index 7c2a941c41..6e8f056881 100644 --- a/README.md +++ b/README.md @@ -228,7 +228,7 @@ Since there are a few job queue solutions, here is a table comparing them: | Group Support | ✓ | | | | | | | Batches Support | ✓ | | | | | | | Parent/Child Dependencies | ✓ | ✓ | | | | | -| Debouncing | ✓ | ✓ | ✓ | | | | +| Deduplication | ✓ | ✓ | ✓ | | | | | Priorities | ✓ | ✓ | ✓ | ✓ | | ✓ | | Concurrency | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | Delayed jobs | ✓ | ✓ | ✓ | ✓ | | ✓ | diff --git a/docs/gitbook/SUMMARY.md b/docs/gitbook/SUMMARY.md index 32944bac16..82a163692c 100644 --- a/docs/gitbook/SUMMARY.md +++ b/docs/gitbook/SUMMARY.md @@ -70,7 +70,8 @@ - [Producers](guide/nestjs/producers.md) - [Queue Events Listeners](guide/nestjs/queue-events-listeners.md) - [Going to production](guide/going-to-production.md) -- [Migration to newer versions](guide/migration-to-newer-versions.md) +- [Migration to newer versions](guide/migrations/migration-to-newer-versions.md) + - [Version 6](guide/migrations/v6.md) - [Troubleshooting](guide/troubleshooting.md) ## Patterns diff --git a/docs/gitbook/guide/flows/fail-parent.md b/docs/gitbook/guide/flows/fail-parent.md index 12a8a5b205..b179f05abe 100644 --- a/docs/gitbook/guide/flows/fail-parent.md +++ b/docs/gitbook/guide/flows/fail-parent.md @@ -2,7 +2,7 @@ In some situations, you may need to fail a job when _one of its children_ fails. -The pattern to solve this requirement consists of using the **`failParentOnFailure`** option. +The pattern to solve this requirement consists of using the **`onChildFailure`** option as **fail**, this is also our default behavior. ```typescript const flow = new FlowProducer({ connection }); @@ -16,13 +16,13 @@ const originalTree = await flow.add({ name, data: { idx: 0, foo: 'bar' }, queueName: 'childrenQueueName', - opts: { failParentOnFailure: true }, + opts: { onChildFailure: 'fail' }, children: [ { name, data: { idx: 1, foo: 'bah' }, queueName: 'grandChildrenQueueName', - opts: { failParentOnFailure: true }, + opts: { onChildFailure: 'fail' }, }, { name, diff --git a/docs/gitbook/guide/flows/ignore-dependency.md b/docs/gitbook/guide/flows/ignore-dependency.md index 8dcd628f18..0d3d2f83a3 100644 --- a/docs/gitbook/guide/flows/ignore-dependency.md +++ b/docs/gitbook/guide/flows/ignore-dependency.md @@ -2,7 +2,7 @@ In some situations, you may have a parent job and need to ignore when one of its children fail. -The pattern to solve this requirement consists on using the **ignoreDependencyOnFailure** option. This option will make sure that when a job fails, the dependency is ignored from the parent, so the parent will complete without waiting for the failed children. +The pattern to solve this requirement consists on using the **onChildFailure** option as **ignore**. This option will make sure that when a job fails, the dependency is ignored from the parent, so the parent will complete without waiting for the failed children. ```typescript const flow = new FlowProducer({ connection }); @@ -16,7 +16,7 @@ const originalTree = await flow.add({ name, data: { idx: 0, foo: 'bar' }, queueName: 'childrenQueueName', - opts: { ignoreDependencyOnFailure: true }, + opts: { onChildFailure: 'ignore' }, children: [ { name, diff --git a/docs/gitbook/guide/flows/remove-dependency.md b/docs/gitbook/guide/flows/remove-dependency.md index ced85f147e..b9e03c17f9 100644 --- a/docs/gitbook/guide/flows/remove-dependency.md +++ b/docs/gitbook/guide/flows/remove-dependency.md @@ -2,7 +2,7 @@ In some situations, you may have a parent job and need to remove the relationship when one of its children fail. -The pattern to solve this requirement consists on using the **removeDependencyOnFailure** option. This option will make sure that when a job fails, the dependency is removed from the parent, so the parent will complete without waiting for the failed children. +The pattern to solve this requirement consists on using the **onChildFailure** option as **remove**. This option will make sure that when a job fails, the dependency is removed from the parent, so the parent will complete without waiting for the failed children. ```typescript const flow = new FlowProducer({ connection }); @@ -16,7 +16,7 @@ const originalTree = await flow.add({ name, data: { idx: 0, foo: 'bar' }, queueName: 'childrenQueueName', - opts: { removeDependencyOnFailure: true }, + opts: { onChildFailure: 'remove' }, children: [ { name, diff --git a/docs/gitbook/guide/migration-to-newer-versions.md b/docs/gitbook/guide/migrations/migration-to-newer-versions.md similarity index 99% rename from docs/gitbook/guide/migration-to-newer-versions.md rename to docs/gitbook/guide/migrations/migration-to-newer-versions.md index 0b8ca2a175..ad2c27916b 100644 --- a/docs/gitbook/guide/migration-to-newer-versions.md +++ b/docs/gitbook/guide/migrations/migration-to-newer-versions.md @@ -55,4 +55,3 @@ Since BullMQ supports global pause, one possible strategy, if suitable for your ### Use new queues altogether This drastic solution involves discontinuing use of older queues and creating new ones. You could rename older queues (e.g., "myQueueV2"), use a new Redis host, or maintain two versions of the service—one running an older BullMQ version with old queues, and a newer one with the latest BullMQ and a different set of queues. When the older version has no more jobs to process, it can be retired, leaving only the upgraded version. - diff --git a/docs/gitbook/guide/migrations/v6.md b/docs/gitbook/guide/migrations/v6.md new file mode 100644 index 0000000000..b73e60bf46 --- /dev/null +++ b/docs/gitbook/guide/migrations/v6.md @@ -0,0 +1,25 @@ +--- +description: Tips and hints on how to migrate to v6. +--- + +# Migration to v6 + +Make sure to call **runMigrations** method from Queue class in order to execute all necessary changes when coming from an older version. + +## Migration of deprecated paused key + +If you have paused queues after upgrading to this version. These jobs will be moved to wait state when initializing any of our instances (Worker, Queue, QueueEvents or FlowProducer). + +Paused key is not longer needed as this state is already represented inside meta key. It also improves the process of pausing or resuming a queue as we don't need to rename any key. + +## Remove legacy markers + +When migrating from versions before v5. +It's recommended to do this process: + +1. Pause your queues. +2. Upgrade to v6. +3. Instantiate a Queue instance and execute runMigrations method where migrations will be executed. +4. Resume your queues. + +This way you will prevent that your workers pick a legacy marker that is no longer used because new markers are added in a different structure. diff --git a/docs/gitbook/guide/rate-limiting.md b/docs/gitbook/guide/rate-limiting.md index 49e155e528..84c761cb77 100644 --- a/docs/gitbook/guide/rate-limiting.md +++ b/docs/gitbook/guide/rate-limiting.md @@ -62,21 +62,23 @@ await queue.add('rate limited paint', { customerId: 'my-customer-id' }); Sometimes is useful to rate-limit a queue manually instead of based on some static options. For example, you may have an API that returns `429 Too Many Requests`, and you want to rate-limit the queue based on that response. -For this purpose, you can use the worker method **`rateLimit`** like this: +For this purpose, you can use the queue method **`rateLimit`** like this: ```typescript -import { Worker } from 'bullmq'; +import { Queue, RateLimitError, Worker } from 'bullmq'; + +const queue = new Queue('myQueue', { connection }); const worker = new Worker( 'myQueue', async () => { const [isRateLimited, duration] = await doExternalCall(); if (isRateLimited) { - await worker.rateLimit(duration); + await queue.rateLimit(duration); // Do not forget to throw this special exception, // since we must differentiate this case from a failure // in order to move the job to wait again. - throw Worker.RateLimitError(); + throw new RateLimitError(); } }, { @@ -130,6 +132,6 @@ By removing rate limit key, workers will be able to pick jobs again and your rat ## Read more: -- 💡 [Rate Limit API Reference](https://api.docs.bullmq.io/classes/v5.Worker.html#rateLimit) +- 💡 [Rate Limit API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#rateLimit) - 💡 [Get Rate Limit Ttl API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#getRateLimitTtl) - 💡 [Remove Rate Limit Key API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#removeRateLimitKey) diff --git a/docs/gitbook/patterns/stop-retrying-jobs.md b/docs/gitbook/patterns/stop-retrying-jobs.md index 28757e9fee..2a7806cafd 100644 --- a/docs/gitbook/patterns/stop-retrying-jobs.md +++ b/docs/gitbook/patterns/stop-retrying-jobs.md @@ -25,21 +25,23 @@ await queue.add( When a job is rate limited using `Worker.RateLimitError` and tried again, the `attempts` check is ignored, as rate limiting is not considered a real error. However, if you want to manually check the attempts and avoid retrying the job, you can do the following: ```typescript -import { Worker, UnrecoverableError } from 'bullmq'; +import { Queue, RateLimitError, Worker, UnrecoverableError } from 'bullmq'; + +const queue = new Queue('myQueue', { connection }); const worker = new Worker( 'myQueue', async job => { const [isRateLimited, duration] = await doExternalCall(); if (isRateLimited) { - await worker.rateLimit(duration); + await queue.rateLimit(duration); if (job.attemptsMade >= job.opts.attempts) { throw new UnrecoverableError('Unrecoverable'); } // Do not forget to throw this special exception, // since we must differentiate this case from a failure // in order to move the job to wait again. - throw Worker.RateLimitError(); + throw new RateLimitError(); } }, { @@ -54,4 +56,4 @@ const worker = new Worker( ## Read more: -- 💡 [Rate Limit API Reference](https://api.docs.bullmq.io/classes/v5.Worker.html#rateLimit) +- 💡 [Rate Limit API Reference](https://api.docs.bullmq.io/classes/v5.Queue.html#rateLimit) diff --git a/python/bullmq/job.py b/python/bullmq/job.py index 7d69101937..8ef0f73b9a 100644 --- a/python/bullmq/job.py +++ b/python/bullmq/job.py @@ -13,8 +13,7 @@ optsDecodeMap = { - 'fpof': 'failParentOnFailure', - 'idof': 'ignoreDependencyOnFailure', + 'ocf': 'onChildFailure', 'kl': 'keepLogs', } diff --git a/python/bullmq/queue.py b/python/bullmq/queue.py index 0bcffc1fae..91e18a37a9 100644 --- a/python/bullmq/queue.py +++ b/python/bullmq/queue.py @@ -141,7 +141,7 @@ async def getJobLogs(self, job_id:str, start = 0, end = -1, asc = True): "logs": result[0], "count": result[1] } - + async def obliterate(self, force: bool = False): """ Completely destroys the queue and all of its contents irreversibly. diff --git a/python/bullmq/scripts.py b/python/bullmq/scripts.py index d3c18232d0..34cd018014 100644 --- a/python/bullmq/scripts.py +++ b/python/bullmq/scripts.py @@ -22,6 +22,13 @@ basePath = os.path.dirname(os.path.realpath(__file__)) +onChildFailureMap = { + 'fail': 'f', + 'ignore': 'i', + 'remove': 'r', + 'wait': 'w' +} + class Scripts: def __init__(self, prefix: str, queueName: str, redisConnection: RedisConnection): @@ -31,11 +38,11 @@ def __init__(self, prefix: str, queueName: str, redisConnection: RedisConnection self.redisConnection = redisConnection self.redisClient = redisConnection.conn self.commands = { - "addStandardJob": self.redisClient.register_script(self.getScript("addStandardJob-8.lua")), + "addStandardJob": self.redisClient.register_script(self.getScript("addStandardJob-7.lua")), "addDelayedJob": self.redisClient.register_script(self.getScript("addDelayedJob-6.lua")), "addParentJob": self.redisClient.register_script(self.getScript("addParentJob-4.lua")), "addPrioritizedJob": self.redisClient.register_script(self.getScript("addPrioritizedJob-8.lua")), - "changePriority": self.redisClient.register_script(self.getScript("changePriority-7.lua")), + "changePriority": self.redisClient.register_script(self.getScript("changePriority-6.lua")), "cleanJobsInSet": self.redisClient.register_script(self.getScript("cleanJobsInSet-3.lua")), "extendLock": self.redisClient.register_script(self.getScript("extendLock-2.lua")), "getCounts": self.redisClient.register_script(self.getScript("getCounts-1.lua")), @@ -44,18 +51,19 @@ def __init__(self, prefix: str, queueName: str, redisConnection: RedisConnection "getState": self.redisClient.register_script(self.getScript("getState-8.lua")), "getStateV2": self.redisClient.register_script(self.getScript("getStateV2-8.lua")), "isJobInList": self.redisClient.register_script(self.getScript("isJobInList-1.lua")), - "moveStalledJobsToWait": self.redisClient.register_script(self.getScript("moveStalledJobsToWait-9.lua")), - "moveToActive": self.redisClient.register_script(self.getScript("moveToActive-11.lua")), + "moveStalledJobsToWait": self.redisClient.register_script(self.getScript("moveStalledJobsToWait-8.lua")), + "moveToActive": self.redisClient.register_script(self.getScript("moveToActive-10.lua")), "moveToDelayed": self.redisClient.register_script(self.getScript("moveToDelayed-8.lua")), - "moveToFinished": self.redisClient.register_script(self.getScript("moveToFinished-14.lua")), + "moveToFinished": self.redisClient.register_script(self.getScript("moveToFinished-13.lua")), "moveToWaitingChildren": self.redisClient.register_script(self.getScript("moveToWaitingChildren-5.lua")), "obliterate": self.redisClient.register_script(self.getScript("obliterate-2.lua")), "pause": self.redisClient.register_script(self.getScript("pause-7.lua")), - "promote": self.redisClient.register_script(self.getScript("promote-9.lua")), + "promote": self.redisClient.register_script(self.getScript("promote-8.lua")), "removeJob": self.redisClient.register_script(self.getScript("removeJob-2.lua")), - "reprocessJob": self.redisClient.register_script(self.getScript("reprocessJob-8.lua")), - "retryJob": self.redisClient.register_script(self.getScript("retryJob-11.lua")), - "moveJobsToWait": self.redisClient.register_script(self.getScript("moveJobsToWait-8.lua")), + "migrateDeprecatedPausedKey": self.redisClient.register_script(self.getScript("migrateDeprecatedPausedKey-2.lua")), + "reprocessJob": self.redisClient.register_script(self.getScript("reprocessJob-7.lua")), + "retryJob": self.redisClient.register_script(self.getScript("retryJob-10.lua")), + "moveJobsToWait": self.redisClient.register_script(self.getScript("moveJobsToWait-7.lua")), "saveStacktrace": self.redisClient.register_script(self.getScript("saveStacktrace-1.lua")), "updateData": self.redisClient.register_script(self.getScript("updateData-1.lua")), "updateProgress": self.redisClient.register_script(self.getScript("updateProgress-3.lua")), @@ -93,6 +101,12 @@ def addJobArgs(self, job: Job, waiting_children_key: str|None): jsonData = json.dumps(job.data, separators=(',', ':'), allow_nan=False) packedOpts = msgpack.packb(job.opts) + parent = {} + parent.update(job.parent or {}) + parent.update({ + 'ocf': onChildFailureMap.get(job.opts.get("ocf", ''), None) + }) + parent = job.parent parentKey = job.parentKey @@ -118,7 +132,7 @@ def addStandardJob(self, job: Job, timestamp: int, pipe = None): """ Add a standard job to the queue """ - keys = self.getKeys(['wait', 'paused', 'meta', 'id', + keys = self.getKeys(['wait', 'meta', 'id', 'completed', 'active', 'events', 'marker']) args = self.addJobArgs(job, None) args.append(timestamp) @@ -246,15 +260,15 @@ def saveStacktraceArgs(self, job_id: str, stacktrace: str, failedReason: str): return (keys, args) def retryJobArgs(self, job_id: str, lifo: bool, token: str, opts: dict = {}): - keys = self.getKeys(['active', 'wait', 'paused']) + keys = self.getKeys(['active', 'wait']) keys.append(self.toKey(job_id)) keys.append(self.keys['meta']) keys.append(self.keys['events']) keys.append(self.keys['delayed']) keys.append(self.keys['prioritized']) keys.append(self.keys['pc']) - keys.append(self.keys['marker']) keys.append(self.keys['stalled']) + keys.append(self.keys['marker']) push_cmd = "RPUSH" if lifo else "LPUSH" @@ -289,7 +303,6 @@ def promoteArgs(self, job_id: str): keys = self.getKeys(['delayed', 'wait', 'paused', 'meta', 'prioritized', 'active', 'pc', 'events', 'marker']) keys.append(self.toKey(job_id)) keys.append(self.keys['events']) - keys.append(self.keys['paused']) keys.append(self.keys['meta']) args = [self.keys[''], job_id] @@ -361,7 +374,6 @@ async def isJobInList(self, list_key: str, job_id: str): async def changePriority(self, job_id: str, priority:int = 0, lifo:bool = False): keys = [self.keys['wait'], - self.keys['paused'], self.keys['meta'], self.keys['prioritized'], self.keys['active'], @@ -395,7 +407,6 @@ async def reprocessJob(self, job: Job, state: str): keys.append(self.keys[state]) keys.append(self.keys['wait']) keys.append(self.keys['meta']) - keys.append(self.keys['paused']) keys.append(self.keys['active']) keys.append(self.keys['marker']) @@ -437,7 +448,7 @@ async def obliterate(self, count: int, force: bool = False): def moveJobsToWaitArgs(self, state: str, count: int, timestamp: int) -> int: keys = self.getKeys( - ['', 'events', state, 'wait', 'paused', 'meta', 'active', 'marker']) + ['', 'events', state, 'wait', 'meta', 'active', 'marker']) args = [count or 1000, timestamp or round(time.time()*1000), state] return (keys, args) @@ -470,7 +481,7 @@ async def moveToActive(self, token: str, opts: dict) -> list[Any]: limiter = opts.get("limiter", None) keys = self.getKeys(['wait', 'active', 'prioritized', 'events', - 'stalled', 'limiter', 'delayed', 'paused', 'meta', 'pc', 'marker']) + 'stalled', 'limiter', 'delayed', 'meta', 'pc', 'marker']) packedOpts = msgpack.packb( {"token": token, "lockDuration": lockDuration, "limiter": limiter}, use_bin_type=True) args = [self.keys[''], timestamp, packedOpts] @@ -503,7 +514,7 @@ def moveToFinishedArgs(self, job: Job, val: Any, propVal: str, shouldRemove, tar metricsKey = self.toKey('metrics:' + target) keys = self.getKeys(['wait', 'active', 'prioritized', 'events', - 'stalled', 'limiter', 'delayed', 'paused', 'meta', 'pc', target]) + 'stalled', 'limiter', 'delayed', 'meta', 'pc', target]) keys.append(self.toKey(job.id)) keys.append(metricsKey) keys.append(self.keys['marker']) @@ -537,8 +548,6 @@ def getMetricsSize(opts: dict): "attempts": job.attempts, "attemptsMade": job.attemptsMade, "maxMetricsSize": getMetricsSize(opts), - "fpof": opts.get("failParentOnFailure", False), - "idof": opts.get("ignoreDependencyOnFailure", False) }, use_bin_type=True) args = [job.id, timestamp, propVal, transformed_value or "", target, @@ -569,7 +578,7 @@ def extendLock(self, jobId: str, token: str, duration: int, client: Redis = None def moveStalledJobsToWait(self, maxStalledCount: int, stalledInterval: int): keys = self.getKeys(['stalled', 'wait', 'active', 'failed', - 'stalled-check', 'meta', 'paused', 'marker', 'events']) + 'stalled-check', 'meta', 'marker', 'events']) args = [maxStalledCount, self.keys[''], round( time.time() * 1000), stalledInterval] return self.commands["moveStalledJobsToWait"](keys, args) diff --git a/src/classes/index.ts b/src/classes/index.ts index 88c348d845..c2a0e63587 100644 --- a/src/classes/index.ts +++ b/src/classes/index.ts @@ -15,7 +15,6 @@ export * from './queue-getters'; export * from './queue-keys'; export * from './queue'; export * from './redis-connection'; -export * from './repeat'; export * from './sandbox'; export * from './scripts'; export * from './worker'; diff --git a/src/classes/job.ts b/src/classes/job.ts index b63e5cdf49..deb1a713b2 100644 --- a/src/classes/job.ts +++ b/src/classes/job.ts @@ -44,12 +44,12 @@ const optsDecodeMap = { fpof: 'failParentOnFailure', idof: 'ignoreDependencyOnFailure', kl: 'keepLogs', + ocf: 'onChildFailure', rdof: 'removeDependencyOnFailure', tm: 'telemetryMetadata', }; const optsEncodeMap = invertObject(optsDecodeMap); -optsEncodeMap.debounce = 'de'; export const PRIORITY_LIMIT = 2 ** 21; @@ -149,12 +149,6 @@ export class Job< */ parent?: ParentKeys; - /** - * Debounce identifier. - * @deprecated use deduplicationId - */ - debounceId?: string; - /** * Deduplication identifier. */ @@ -224,10 +218,9 @@ export class Job< ? { id: opts.parent.id, queueKey: opts.parent.queue } : undefined; - this.debounceId = opts.debounce ? opts.debounce.id : undefined; this.deduplicationId = opts.deduplication ? opts.deduplication.id - : this.debounceId; + : undefined; this.toKey = queue.toKey.bind(queue); this.setScripts(); @@ -353,7 +346,6 @@ export class Job< } if (json.deid) { - job.debounceId = json.deid; job.deduplicationId = json.deid; } @@ -480,7 +472,6 @@ export class Job< timestamp: this.timestamp, failedReason: JSON.stringify(this.failedReason), stacktrace: JSON.stringify(this.stacktrace), - debounceId: this.debounceId, deduplicationId: this.deduplicationId, repeatJobKey: this.repeatJobKey, returnvalue: JSON.stringify(this.returnvalue), @@ -1264,27 +1255,6 @@ export class Job< throw new Error(`Delay and repeat options could not be used together`); } - if (this.opts.removeDependencyOnFailure && this.opts.failParentOnFailure) { - throw new Error( - `RemoveDependencyOnFailure and failParentOnFailure options can not be used together`, - ); - } - - if ( - this.opts.removeDependencyOnFailure && - this.opts.ignoreDependencyOnFailure - ) { - throw new Error( - `RemoveDependencyOnFailure and ignoreDependencyOnFailure options can not be used together`, - ); - } - - if (this.opts.failParentOnFailure && this.opts.ignoreDependencyOnFailure) { - throw new Error( - `FailParentOnFailure and ignoreDependencyOnFailure options can not be used together`, - ); - } - if (`${parseInt(this.id, 10)}` === this.id) { throw new Error('Custom Ids cannot be integers'); } diff --git a/src/classes/migrations.ts b/src/classes/migrations.ts new file mode 100644 index 0000000000..7a25315b4f --- /dev/null +++ b/src/classes/migrations.ts @@ -0,0 +1,133 @@ +import { RedisClient } from '../interfaces'; +import { isVersionLowerThan } from '../utils'; + +export interface MigrationOptions { + prefix: string; + queueName: string; + packageVersion?: string; +} + +export type MigrationFunction = ( + client: RedisClient, + opts: MigrationOptions, +) => Promise; + +export const checkPendingMigrations = async ( + client: RedisClient, + opts: MigrationOptions, +) => { + const metaKey = getRedisKeyFromOpts(opts, 'meta'); + const currentVersion = await client.hget(metaKey, 'version'); + + // If version is not set yet, it means it's an enterily new user + if (!currentVersion) { + return false; + } + + if (isVersionLowerThan(currentVersion, '6.0.0')) { + const migrationsKey = getRedisKeyFromOpts(opts, 'migrations'); + const existingMigrations = await client.zrange(migrationsKey, 0, -1); + return migrations.some( + migration => + !existingMigrations.includes(`${migration.version}-${migration.name}`), + ); + } + + return false; +}; + +const getCommandName = (commandName: string, packageVersion: string) => + `${commandName}:${packageVersion}`; + +export const migrations: { + name: string; + version: string; + migrate: MigrationFunction; +}[] = [ + { + name: 'remove-legacy-markers', + version: '6.0.0', + migrate: async (client: RedisClient, opts: MigrationOptions) => { + const keys: (string | number)[] = [ + getRedisKeyFromOpts(opts, 'wait'), + getRedisKeyFromOpts(opts, 'paused'), + getRedisKeyFromOpts(opts, 'meta'), + getRedisKeyFromOpts(opts, 'completed'), + getRedisKeyFromOpts(opts, 'failed'), + ]; + const args = [getRedisKeyFromOpts(opts, '')]; + + await (client)[ + getCommandName('removeLegacyMarkers', opts.packageVersion) + ](keys.concat(args)); + }, + }, + { + name: 'migrate-paused-jobs', + version: '6.0.0', + migrate: async (client: RedisClient, opts: MigrationOptions) => { + const keys: (string | number)[] = [ + getRedisKeyFromOpts(opts, 'paused'), + getRedisKeyFromOpts(opts, 'wait'), + ]; + await (client)[ + getCommandName('migrateDeprecatedPausedKey', opts.packageVersion) + ](keys); + }, + }, +]; + +/** + * Run Migrations. + * + * This method is used to run possibly existing migrations for the queue. + * + * Normally, if there are pending migrations, the Queue, Worker and QueueEvents instances + * will throw an error when they are instantiated. Use then this method to run the migrations + * before instantiating the instances. + * + * @param redisClient The Redis client instance + * @param opts The options for the migration + * + * @sa https://docs.bullmq.io/guide/migrations + */ +export const runMigrations = async ( + redisClient: RedisClient, + opts: { + prefix?: string; + queueName: string; + packageVersion: string; + }, +) => { + const prefix = opts.prefix || 'bull'; + const migrationsKey = getRedisKeyFromOpts({ prefix, ...opts }, 'migrations'); + + // The migrations key is a ZSET with the migration timestamp as the score + for (const migration of migrations) { + const migrationId = `${migration.version}-${migration.name}`; + const pendingMigration = !!(await redisClient.zscore( + migrationsKey, + migrationId, + )); + if (pendingMigration) { + continue; + } + console.log(`[BULLMQ] Running migration ${migrationId}`); + try { + await migration.migrate(redisClient, { + prefix, + queueName: opts.queueName, + packageVersion: opts.packageVersion, + }); + await redisClient.zadd(migrationsKey, Date.now(), migrationId); + } catch (err) { + console.error(`[BULLMQ] Migration ${migrationId} failed: ${err}`); + break; + } + console.log(`[BULLMQ] Migration ${migrationId} completed`); + } +}; + +function getRedisKeyFromOpts(opts: MigrationOptions, key: string): string { + return `${opts.prefix}:${opts.queueName}:${key}`; +} diff --git a/src/classes/queue-base.ts b/src/classes/queue-base.ts index 2760a7b108..fc9e738f2c 100644 --- a/src/classes/queue-base.ts +++ b/src/classes/queue-base.ts @@ -12,7 +12,9 @@ import { RedisConnection } from './redis-connection'; import { Job } from './job'; import { KeysMap, QueueKeys } from './queue-keys'; import { Scripts } from './scripts'; -import { TelemetryAttributes, SpanKind } from '../enums'; +import { checkPendingMigrations, runMigrations } from './migrations'; +import { SpanKind } from '../enums'; +import { version as packageVersion } from '../version'; /** * @class QueueBase @@ -23,6 +25,8 @@ import { TelemetryAttributes, SpanKind } from '../enums'; * */ export class QueueBase extends EventEmitter implements MinimalQueue { + public readonly qualifiedName: string; + toKey: (type: string) => string; keys: KeysMap; closing: Promise | undefined; @@ -31,7 +35,9 @@ export class QueueBase extends EventEmitter implements MinimalQueue { protected hasBlockingConnection: boolean = false; protected scripts: Scripts; protected connection: RedisConnection; - public readonly qualifiedName: string; + + protected checkedPendingMigrations: boolean = false; + protected packageVersion = packageVersion; /** * @@ -85,9 +91,37 @@ export class QueueBase extends EventEmitter implements MinimalQueue { /** * Returns a promise that resolves to a redis client. Normally used only by subclasses. + * This method will also check if there are pending migrations, if so it will throw an error. */ get client(): Promise { - return this.connection.client; + if (this.checkedPendingMigrations) { + return this.connection.client; + } else { + return this.connection.client.then(client => { + return checkPendingMigrations(client, { + prefix: this.opts.prefix, + queueName: this.name, + }).then(hasPendingMigrations => { + if (hasPendingMigrations) { + throw new Error( + 'Queue has pending migrations. See https://docs.bullmq.io/guide/migrations', + ); + } + this.checkedPendingMigrations = true; + return client; + }); + }); + } + } + + async runMigrations() { + const client = await this.client; + await runMigrations(client, { + prefix: this.opts.prefix, + queueName: this.name, + packageVersion: this.packageVersion, + }); + this.checkedPendingMigrations = true; } protected setScripts() { diff --git a/src/classes/queue-events-producer.ts b/src/classes/queue-events-producer.ts index 87d7487d16..7f35521604 100644 --- a/src/classes/queue-events-producer.ts +++ b/src/classes/queue-events-producer.ts @@ -16,7 +16,6 @@ export class QueueEventsProducer extends QueueBase { super( name, { - blockingConnection: false, ...opts, }, Connection, diff --git a/src/classes/queue-events.ts b/src/classes/queue-events.ts index ea875b8706..17e498bd86 100644 --- a/src/classes/queue-events.ts +++ b/src/classes/queue-events.ts @@ -45,14 +45,6 @@ export interface QueueEventsListener extends IoredisListener { id: string, ) => void; - /** - * Listen to 'debounced' event. - * @deprecated use deduplicated event - * - * This event is triggered when a job is debounced because debounceId still existed. - */ - debounced: (args: { jobId: string; debounceId: string }, id: string) => void; - /** * Listen to 'deduplicated' event. * diff --git a/src/classes/queue-getters.ts b/src/classes/queue-getters.ts index 29e0e9d1c3..0f3d7e20e1 100644 --- a/src/classes/queue-getters.ts +++ b/src/classes/queue-getters.ts @@ -98,18 +98,6 @@ export class QueueGetters extends QueueBase { return this.scripts.getRateLimitTtl(maxJobs); } - /** - * Get jobId that starts debounced state. - * @deprecated use getDeduplicationJobId method - * - * @param id - debounce identifier - */ - async getDebounceJobId(id: string): Promise { - const client = await this.client; - - return client.get(`${this.keys.de}:${id}`); - } - /** * Get jobId from deduplicated state. * diff --git a/src/classes/queue-keys.ts b/src/classes/queue-keys.ts index aaea64a130..44e7ae7df3 100644 --- a/src/classes/queue-keys.ts +++ b/src/classes/queue-keys.ts @@ -21,6 +21,7 @@ export class QueueKeys { 'repeat', 'limiter', 'meta', + 'migrations', 'events', 'pc', // priority counter key 'marker', // marker key diff --git a/src/classes/queue.ts b/src/classes/queue.ts index 89373010ae..cbd3d0cf61 100644 --- a/src/classes/queue.ts +++ b/src/classes/queue.ts @@ -10,11 +10,9 @@ import { import { FinishedStatus, JobsOptions, MinimalQueue } from '../types'; import { Job } from './job'; import { QueueGetters } from './queue-getters'; -import { Repeat } from './repeat'; import { RedisConnection } from './redis-connection'; import { SpanKind, TelemetryAttributes } from '../enums'; import { JobScheduler } from './job-scheduler'; -import { version } from '../version'; export interface ObliterateOpts { /** @@ -152,7 +150,6 @@ export class Queue< protected libName = 'bullmq'; - private _repeat?: Repeat; // To be deprecated in v6 in favor of JobScheduler protected _jobScheduler?: JobScheduler; constructor( @@ -225,7 +222,7 @@ export class Queue< get metaValues(): Record { return { 'opts.maxLenEvents': this.opts?.streams?.events?.maxLen ?? 10000, - version: `${this.libName}:${version}`, + version: `${this.libName}:${this.packageVersion}`, }; } @@ -239,19 +236,6 @@ export class Queue< return await client.hget(this.keys.meta, 'version'); } - get repeat(): Promise { - return new Promise(async resolve => { - if (!this._repeat) { - this._repeat = new Repeat(this.name, { - ...this.opts, - connection: await this.client, - }); - this._repeat.on('error', e => this.emit.bind(this, e)); - } - resolve(this._repeat); - }); - } - get jobScheduler(): Promise { return new Promise(async resolve => { if (!this._jobScheduler) { @@ -338,39 +322,25 @@ export class Queue< data: DataType, opts?: JobsOptions, ): Promise> { - if (opts && opts.repeat) { - if (opts.repeat.endDate) { - if (+new Date(opts.repeat.endDate) < Date.now()) { - throw new Error('End date must be greater than current timestamp'); - } - } + const jobId = opts?.jobId; - return (await this.repeat).updateRepeatableJob< - DataType, - ResultType, - NameType - >(name, data, { ...this.jobsOpts, ...opts }, { override: true }); - } else { - const jobId = opts?.jobId; + if (jobId == '0' || jobId?.includes(':')) { + throw new Error("JobId cannot be '0' or contain :"); + } - if (jobId == '0' || jobId?.startsWith('0:')) { - throw new Error("JobId cannot be '0' or start with 0:"); - } + const job = await this.Job.create( + this as MinimalQueue, + name, + data, + { + ...this.jobsOpts, + ...opts, + jobId, + }, + ); + this.emit('waiting', job as JobBase); - const job = await this.Job.create( - this as MinimalQueue, - name, - data, - { - ...this.jobsOpts, - ...opts, - jobId, - }, - ); - this.emit('waiting', job as JobBase); - - return job; - } + return job; } /** @@ -480,12 +450,6 @@ export class Queue< */ async close(): Promise { await this.trace(SpanKind.INTERNAL, 'close', this.name, async () => { - if (!this.closing) { - if (this._repeat) { - await this._repeat.close(); - } - } - await super.close(); }); } @@ -547,25 +511,6 @@ export class Queue< return this.scripts.isMaxed(); } - /** - * Get all repeatable meta jobs. - * - * - * @deprecated This method is deprecated and will be removed in v6. Use getJobSchedulers instead. - * - * @param start - Offset of first job to return. - * @param end - Offset of last job to return. - * @param asc - Determine the order in which jobs are returned based on their - * next execution time. - */ - async getRepeatableJobs( - start?: number, - end?: number, - asc?: boolean, - ): Promise { - return (await this.repeat).getRepeatableJobs(start, end, asc); - } - /** * Get Job Scheduler by id * @@ -591,44 +536,6 @@ export class Queue< return (await this.jobScheduler).getJobSchedulers(start, end, asc); } - /** - * Removes a repeatable job. - * - * Note: you need to use the exact same repeatOpts when deleting a repeatable job - * than when adding it. - * - * @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead. - * - * @see removeRepeatableByKey - * - * @param name - Job name - * @param repeatOpts - - * @param jobId - - * @returns - */ - async removeRepeatable( - name: NameType, - repeatOpts: RepeatOptions, - jobId?: string, - ): Promise { - return this.trace( - SpanKind.INTERNAL, - 'removeRepeatable', - `${this.name}.${name}`, - async span => { - span?.setAttributes({ - [TelemetryAttributes.JobName]: name, - [TelemetryAttributes.JobId]: jobId, - }); - - const repeat = await this.repeat; - const removed = await repeat.removeRepeatable(name, repeatOpts, jobId); - - return !removed; - }, - ); - } - /** * * Removes a job scheduler. @@ -644,29 +551,6 @@ export class Queue< return !removed; } - /** - * Removes a debounce key. - * @deprecated use removeDeduplicationKey - * - * @param id - identifier - */ - async removeDebounceKey(id: string): Promise { - return this.trace( - SpanKind.INTERNAL, - 'removeDebounceKey', - `${this.name}`, - async span => { - span?.setAttributes({ - [TelemetryAttributes.JobKey]: id, - }); - - const client = await this.client; - - return await client.del(`${this.keys.de}:${id}`); - }, - ); - } - /** * Removes a deduplication key. * @@ -697,36 +581,6 @@ export class Queue< return client.del(this.keys.limiter); } - /** - * Removes a repeatable job by its key. Note that the key is the one used - * to store the repeatable job metadata and not one of the job iterations - * themselves. You can use "getRepeatableJobs" in order to get the keys. - * - * @see getRepeatableJobs - * - * @deprecated This method is deprecated and will be removed in v6. Use removeJobScheduler instead. - * - * @param repeatJobKey - To the repeatable job. - * @returns - */ - async removeRepeatableByKey(key: string): Promise { - return this.trace( - SpanKind.INTERNAL, - 'removeRepeatableByKey', - `${this.name}`, - async span => { - span?.setAttributes({ - [TelemetryAttributes.JobKey]: key, - }); - - const repeat = await this.repeat; - const removed = await repeat.removeRepeatableByKey(key); - - return !removed; - }, - ); - } - /** * Removes the given job from the queue as well as all its * dependencies. diff --git a/src/classes/redis-connection.ts b/src/classes/redis-connection.ts index 9621f4d8be..354d95cb95 100644 --- a/src/classes/redis-connection.ts +++ b/src/classes/redis-connection.ts @@ -10,7 +10,7 @@ import { isNotConnectionError, isRedisCluster, isRedisInstance, - isRedisVersionLowerThan, + isVersionLowerThan, } from '../utils'; import { version as packageVersion } from '../version'; import * as scripts from '../scripts'; @@ -235,9 +235,7 @@ export class RedisConnection extends EventEmitter { if (this._client['status'] !== 'end') { this.version = await this.getRedisVersion(); if (this.skipVersionCheck !== true && !this.closing) { - if ( - isRedisVersionLowerThan(this.version, RedisConnection.minimumVersion) - ) { + if (isVersionLowerThan(this.version, RedisConnection.minimumVersion)) { throw new Error( `Redis version needs to be greater or equal than ${RedisConnection.minimumVersion} ` + `Current: ${this.version}`, @@ -245,7 +243,7 @@ export class RedisConnection extends EventEmitter { } if ( - isRedisVersionLowerThan( + isVersionLowerThan( this.version, RedisConnection.recommendedMinimumVersion, ) @@ -258,8 +256,8 @@ export class RedisConnection extends EventEmitter { } this.capabilities = { - canDoubleTimeout: !isRedisVersionLowerThan(this.version, '6.0.0'), - canBlockFor1Ms: !isRedisVersionLowerThan(this.version, '7.0.8'), + canDoubleTimeout: !isVersionLowerThan(this.version, '6.0.0'), + canBlockFor1Ms: !isVersionLowerThan(this.version, '7.0.8'), }; this.status = 'ready'; diff --git a/src/classes/repeat.ts b/src/classes/repeat.ts deleted file mode 100644 index 4d14ff265e..0000000000 --- a/src/classes/repeat.ts +++ /dev/null @@ -1,350 +0,0 @@ -import { parseExpression } from 'cron-parser'; -import { createHash } from 'crypto'; -import { - RedisClient, - RepeatBaseOptions, - RepeatableJob, - RepeatOptions, -} from '../interfaces'; -import { JobsOptions, RepeatStrategy } from '../types'; -import { Job } from './job'; -import { QueueBase } from './queue-base'; -import { RedisConnection } from './redis-connection'; - -export class Repeat extends QueueBase { - private repeatStrategy: RepeatStrategy; - private repeatKeyHashAlgorithm: string; - - constructor( - name: string, - opts: RepeatBaseOptions, - Connection?: typeof RedisConnection, - ) { - super(name, opts, Connection); - - this.repeatStrategy = - (opts.settings && opts.settings.repeatStrategy) || getNextMillis; - - this.repeatKeyHashAlgorithm = - (opts.settings && opts.settings.repeatKeyHashAlgorithm) || 'md5'; - } - - async updateRepeatableJob( - name: N, - data: T, - opts: JobsOptions, - { override }: { override: boolean }, - ): Promise | undefined> { - // Backwards compatibility for repeatable jobs for versions <= 3.0.0 - const repeatOpts: RepeatOptions & { cron?: string } = { ...opts.repeat }; - repeatOpts.pattern ??= repeatOpts.cron; - delete repeatOpts.cron; - - // Check if we reached the limit of the repeatable job's iterations - const iterationCount = repeatOpts.count ? repeatOpts.count + 1 : 1; - if ( - typeof repeatOpts.limit !== 'undefined' && - iterationCount > repeatOpts.limit - ) { - return; - } - - // Check if we reached the end date of the repeatable job - let now = Date.now(); - const { endDate } = repeatOpts; - if (!(typeof endDate === undefined) && now > new Date(endDate!).getTime()) { - return; - } - - const prevMillis = opts.prevMillis || 0; - now = prevMillis < now ? now : prevMillis; - - const nextMillis = await this.repeatStrategy(now, repeatOpts, name); - const { every, pattern } = repeatOpts; - - const hasImmediately = Boolean( - (every || pattern) && repeatOpts.immediately, - ); - const offset = hasImmediately && every ? now - nextMillis : undefined; - if (nextMillis) { - // We store the undecorated opts.jobId into the repeat options - if (!prevMillis && opts.jobId) { - repeatOpts.jobId = opts.jobId; - } - - const legacyRepeatKey = getRepeatConcatOptions(name, repeatOpts); - const newRepeatKey = opts.repeat.key ?? this.hash(legacyRepeatKey); - - let repeatJobKey; - if (override) { - repeatJobKey = await this.scripts.addRepeatableJob( - newRepeatKey, - nextMillis, - { - name, - endDate: endDate ? new Date(endDate).getTime() : undefined, - tz: repeatOpts.tz, - pattern, - every, - }, - legacyRepeatKey, - ); - } else { - const client = await this.client; - - repeatJobKey = await this.scripts.updateRepeatableJobMillis( - client, - newRepeatKey, - nextMillis, - legacyRepeatKey, - ); - } - - const { immediately, ...filteredRepeatOpts } = repeatOpts; - - return this.createNextJob( - name, - nextMillis, - repeatJobKey, - { ...opts, repeat: { offset, ...filteredRepeatOpts } }, - data, - iterationCount, - hasImmediately, - ); - } - } - - private async createNextJob( - name: N, - nextMillis: number, - repeatJobKey: string, - opts: JobsOptions, - data: T, - currentCount: number, - hasImmediately: boolean, - ) { - // - // Generate unique job id for this iteration. - // - const jobId = this.getRepeatJobKey(name, nextMillis, repeatJobKey, data); - - const now = Date.now(); - const delay = - nextMillis + (opts.repeat.offset ? opts.repeat.offset : 0) - now; - - const mergedOpts = { - ...opts, - jobId, - delay: delay < 0 || hasImmediately ? 0 : delay, - timestamp: now, - prevMillis: nextMillis, - repeatJobKey, - }; - - mergedOpts.repeat = { ...opts.repeat, count: currentCount }; - - return this.Job.create(this, name, data, mergedOpts); - } - - // TODO: remove legacy code in next breaking change - getRepeatJobKey( - name: N, - nextMillis: number, - repeatJobKey: string, - data: T, - ) { - if (repeatJobKey.split(':').length > 2) { - return this.getRepeatJobId({ - name: name, - nextMillis: nextMillis, - namespace: this.hash(repeatJobKey), - jobId: (data as any)?.id, - }); - } - - return this.getRepeatDelayedJobId({ - customKey: repeatJobKey, - nextMillis, - }); - } - - async removeRepeatable( - name: string, - repeat: RepeatOptions, - jobId?: string, - ): Promise { - const repeatConcatOptions = getRepeatConcatOptions(name, { - ...repeat, - jobId, - }); - const repeatJobKey = repeat.key ?? this.hash(repeatConcatOptions); - const legacyRepeatJobId = this.getRepeatJobId({ - name, - nextMillis: '', - namespace: this.hash(repeatConcatOptions), - jobId: jobId ?? repeat.jobId, - key: repeat.key, - }); - - return this.scripts.removeRepeatable( - legacyRepeatJobId, - repeatConcatOptions, - repeatJobKey, - ); - } - - async removeRepeatableByKey(repeatJobKey: string): Promise { - const data = this.keyToData(repeatJobKey); - - const legacyRepeatJobId = this.getRepeatJobId({ - name: data.name, - nextMillis: '', - namespace: this.hash(repeatJobKey), - jobId: data.id, - }); - - return this.scripts.removeRepeatable(legacyRepeatJobId, '', repeatJobKey); - } - - private async getRepeatableData( - client: RedisClient, - key: string, - next?: number, - ): Promise { - const jobData = await client.hgetall(this.toKey('repeat:' + key)); - - if (jobData) { - return { - key, - name: jobData.name, - endDate: parseInt(jobData.endDate) || null, - tz: jobData.tz || null, - pattern: jobData.pattern || null, - every: jobData.every || null, - next, - }; - } - - return this.keyToData(key, next); - } - - private keyToData(key: string, next?: number): RepeatableJob { - const data = key.split(':'); - const pattern = data.slice(4).join(':') || null; - - return { - key, - name: data[0], - id: data[1] || null, - endDate: parseInt(data[2]) || null, - tz: data[3] || null, - pattern, - next, - }; - } - - async getRepeatableJobs( - start = 0, - end = -1, - asc = false, - ): Promise { - const client = await this.client; - - const key = this.keys.repeat; - const result = asc - ? await client.zrange(key, start, end, 'WITHSCORES') - : await client.zrevrange(key, start, end, 'WITHSCORES'); - - const jobs = []; - for (let i = 0; i < result.length; i += 2) { - jobs.push( - this.getRepeatableData(client, result[i], parseInt(result[i + 1])), - ); - } - return Promise.all(jobs); - } - - async getRepeatableCount(): Promise { - const client = await this.client; - return client.zcard(this.toKey('repeat')); - } - - private hash(str: string) { - return createHash(this.repeatKeyHashAlgorithm).update(str).digest('hex'); - } - - private getRepeatDelayedJobId({ - nextMillis, - customKey, - }: { - customKey: string; - nextMillis: number | string; - }) { - return `repeat:${customKey}:${nextMillis}`; - } - - private getRepeatJobId({ - name, - nextMillis, - namespace, - jobId, - key, - }: { - name?: string; - nextMillis: number | string; - namespace?: string; - jobId?: string; - key?: string; - }) { - const checksum = key ?? this.hash(`${name}${jobId || ''}${namespace}`); - return `repeat:${checksum}:${nextMillis}`; - } -} - -function getRepeatConcatOptions(name: string, repeat: RepeatOptions) { - const endDate = repeat.endDate ? new Date(repeat.endDate).getTime() : ''; - const tz = repeat.tz || ''; - const pattern = repeat.pattern; - const suffix = (pattern ? pattern : String(repeat.every)) || ''; - const jobId = repeat.jobId ? repeat.jobId : ''; - - return `${name}:${jobId}:${endDate}:${tz}:${suffix}`; -} - -export const getNextMillis = ( - millis: number, - opts: RepeatOptions, -): number | undefined => { - const pattern = opts.pattern; - if (pattern && opts.every) { - throw new Error( - 'Both .pattern and .every options are defined for this repeatable job', - ); - } - - if (opts.every) { - return ( - Math.floor(millis / opts.every) * opts.every + - (opts.immediately ? 0 : opts.every) - ); - } - - const currentDate = - opts.startDate && new Date(opts.startDate) > new Date(millis) - ? new Date(opts.startDate) - : new Date(millis); - const interval = parseExpression(pattern, { - ...opts, - currentDate, - }); - - try { - if (opts.immediately) { - return new Date().getTime(); - } else { - return interval.next().getTime(); - } - } catch (e) { - // Ignore error - } -}; diff --git a/src/classes/scripts.ts b/src/classes/scripts.ts index c9156b291d..9d8d9de1d5 100644 --- a/src/classes/scripts.ts +++ b/src/classes/scripts.ts @@ -34,11 +34,18 @@ import { RedisJobOptions, } from '../types'; import { ErrorCode } from '../enums'; -import { array2obj, getParentKey, isRedisVersionLowerThan } from '../utils'; +import { array2obj, getParentKey, isVersionLowerThan } from '../utils'; import { ChainableCommander } from 'ioredis'; import { version as packageVersion } from '../version'; export type JobData = [JobJsonRaw | number, string?]; +const onChildFailureMap = { + fail: 'f', + ignore: 'i', + remove: 'r', + wait: 'w', +}; + export class Scripts { protected version = packageVersion; @@ -55,7 +62,6 @@ export class Scripts { queueKeys.stalled, queueKeys.limiter, queueKeys.delayed, - queueKeys.paused, queueKeys.meta, queueKeys.pc, undefined, @@ -77,7 +83,7 @@ export class Scripts { async isJobInList(listKey: string, jobId: string): Promise { const client = await this.queue.client; let result; - if (isRedisVersionLowerThan(this.queue.redisVersion, '6.0.6')) { + if (isVersionLowerThan(this.queue.redisVersion, '6.0.6')) { result = await this.execCommand(client, 'isJobInList', [listKey, jobId]); } else { result = await client.lpos(listKey, jobId); @@ -154,7 +160,6 @@ export class Scripts { const queueKeys = this.queue.keys; const keys: (string | Buffer)[] = [ queueKeys.wait, - queueKeys.paused, queueKeys.meta, queueKeys.id, queueKeys.completed, @@ -177,7 +182,7 @@ export class Scripts { const queueKeys = this.queue.keys; const parent: Record = job.parent - ? { ...job.parent, fpof: opts.fpof, rdof: opts.rdof, idof: opts.idof } + ? { ...job.parent, ocf: onChildFailureMap[opts.ocf] } : null; const args = [ @@ -546,10 +551,10 @@ export class Scripts { const metricsKey = this.queue.toKey(`metrics:${target}`); const keys = this.moveToFinishedKeys; - keys[10] = queueKeys[target]; - keys[11] = this.queue.toKey(job.id ?? ''); - keys[12] = metricsKey; - keys[13] = this.queue.keys.marker; + keys[9] = queueKeys[target]; + keys[10] = this.queue.toKey(job.id ?? ''); + keys[11] = metricsKey; + keys[12] = this.queue.keys.marker; const keepJobs = this.getKeepJobs(shouldRemove, workerKeepJobs); @@ -570,9 +575,6 @@ export class Scripts { maxMetricsSize: opts.metrics?.maxDataPoints ? opts.metrics?.maxDataPoints : '', - fpof: !!job.opts?.failParentOnFailure, - idof: !!job.opts?.ignoreDependencyOnFailure, - rdof: !!job.opts?.removeDependencyOnFailure, }), ]; @@ -824,7 +826,7 @@ export class Scripts { return this.queue.toKey(key); }); - if (isRedisVersionLowerThan(this.queue.redisVersion, '6.0.6')) { + if (isVersionLowerThan(this.queue.redisVersion, '6.0.6')) { return this.execCommand(client, 'getState', keys.concat([jobId])); } return this.execCommand(client, 'getStateV2', keys.concat([jobId])); @@ -889,7 +891,6 @@ export class Scripts { ): (string | number)[] { const keys: (string | number)[] = [ this.queue.keys.wait, - this.queue.keys.paused, this.queue.keys.meta, this.queue.keys.prioritized, this.queue.keys.active, @@ -1086,15 +1087,14 @@ export class Scripts { const keys: (string | number)[] = [ this.queue.keys.active, this.queue.keys.wait, - this.queue.keys.paused, this.queue.toKey(jobId), this.queue.keys.meta, this.queue.keys.events, this.queue.keys.delayed, this.queue.keys.prioritized, this.queue.keys.pc, - this.queue.keys.marker, this.queue.keys.stalled, + this.queue.keys.marker, ]; const pushCmd = (lifo ? 'R' : 'L') + 'PUSH'; @@ -1118,7 +1118,6 @@ export class Scripts { this.queue.keys.events, this.queue.toKey(state), this.queue.toKey('wait'), - this.queue.toKey('paused'), this.queue.keys.meta, this.queue.keys.active, this.queue.keys.marker, @@ -1174,7 +1173,6 @@ export class Scripts { this.queue.toKey(state), this.queue.keys.wait, this.queue.keys.meta, - this.queue.keys.paused, this.queue.keys.active, this.queue.keys.marker, ]; @@ -1217,7 +1215,6 @@ export class Scripts { queueKeys.stalled, queueKeys.limiter, queueKeys.delayed, - queueKeys.paused, queueKeys.meta, queueKeys.pc, queueKeys.marker, @@ -1249,7 +1246,6 @@ export class Scripts { const keys = [ this.queue.keys.delayed, this.queue.keys.wait, - this.queue.keys.paused, this.queue.keys.meta, this.queue.keys.prioritized, this.queue.keys.active, @@ -1280,7 +1276,6 @@ export class Scripts { this.queue.keys.failed, this.queue.keys['stalled-check'], this.queue.keys.meta, - this.queue.keys.paused, this.queue.keys.marker, this.queue.keys.events, ]; @@ -1329,7 +1324,6 @@ export class Scripts { this.queue.keys.wait, this.queue.keys.stalled, lockKey, - this.queue.keys.paused, this.queue.keys.meta, this.queue.keys.limiter, this.queue.keys.prioritized, @@ -1458,6 +1452,27 @@ export class Scripts { } } + protected executeMigrationsArgs( + currentMigrationExecution = 1, + ): (string | number)[] { + const keys: (string | number)[] = [ + this.queue.keys.meta, + this.queue.keys.migrations, + this.queue.toKey(''), + ]; + const args = [6, Date.now(), currentMigrationExecution]; + + return keys.concat(args); + } + + async executeMigrations(currentMigrationExecution: number): Promise { + const client = await this.queue.client; + + const args = this.executeMigrationsArgs(currentMigrationExecution); + + return (client).executeMigrations(args); + } + finishedErrors({ code, jobId, diff --git a/src/classes/worker.ts b/src/classes/worker.ts index 58fc702f9d..8839c1ea1a 100644 --- a/src/classes/worker.ts +++ b/src/classes/worker.ts @@ -197,10 +197,6 @@ export class Worker< protected processFn: Processor; protected running = false; - static RateLimitError(): Error { - return new RateLimitError(); - } - constructor( name: string, processor?: string | URL | null | Processor, @@ -211,7 +207,6 @@ export class Worker< name, { ...opts, - blockingConnection: true, }, Connection, ); @@ -624,34 +619,6 @@ export class Worker< } } - /** - * Overrides the rate limit to be active for the next jobs. - * @deprecated This method is deprecated and will be removed in v6. Use queue.rateLimit method instead. - * @param expireTimeMs - expire time in ms of this rate limit. - */ - async rateLimit(expireTimeMs: number): Promise { - await this.trace( - SpanKind.INTERNAL, - 'rateLimit', - this.name, - async span => { - span?.setAttributes({ - [TelemetryAttributes.WorkerId]: this.id, - [TelemetryAttributes.WorkerRateLimit]: expireTimeMs, - }); - - await this.client.then(client => - client.set( - this.keys.limiter, - Number.MAX_SAFE_INTEGER, - 'PX', - expireTimeMs, - ), - ); - }, - ); - } - get minimumBlockTimeout(): number { return this.blockingConnection.capabilities.canBlockFor1Ms ? /* 1 millisecond is chosen because the granularity of our timestamps are milliseconds. diff --git a/src/commands/addJobScheduler-2.lua b/src/commands/addJobScheduler-2.lua index 4583e6b223..687776f288 100644 --- a/src/commands/addJobScheduler-2.lua +++ b/src/commands/addJobScheduler-2.lua @@ -67,7 +67,7 @@ if prevMillis ~= false then if rcall("ZSCORE", delayedKey, delayedJobId) ~= false and rcall("EXISTS", nextDelayedJobId) ~= 1 then - removeJob(delayedJobId, true, prefixKey, true --[[remove debounce key]]) + removeJob(delayedJobId, true, prefixKey, true --[[remove deduplication key]]) rcall("ZREM", delayedKey, delayedJobId) end end diff --git a/src/commands/addRepeatableJob-2.lua b/src/commands/addRepeatableJob-2.lua index 4046df5381..0762cf6128 100644 --- a/src/commands/addRepeatableJob-2.lua +++ b/src/commands/addRepeatableJob-2.lua @@ -71,7 +71,7 @@ if prevMillis ~= false then if rcall("ZSCORE", delayedKey, delayedJobId) ~= false and rcall("EXISTS", nextDelayedJobId) ~= 1 then - removeJob(delayedJobId, true, prefixKey, true --[[remove debounce key]]) + removeJob(delayedJobId, true, prefixKey, true --[[remove deduplication key]]) rcall("ZREM", delayedKey, delayedJobId) end end diff --git a/src/commands/addStandardJob-8.lua b/src/commands/addStandardJob-7.lua similarity index 84% rename from src/commands/addStandardJob-8.lua rename to src/commands/addStandardJob-7.lua index 7005e91af7..34c6cbf8ad 100644 --- a/src/commands/addStandardJob-8.lua +++ b/src/commands/addStandardJob-7.lua @@ -16,13 +16,12 @@ Input: KEYS[1] 'wait', - KEYS[2] 'paused' - KEYS[3] 'meta' - KEYS[4] 'id' - KEYS[5] 'completed' - KEYS[6] 'active' - KEYS[7] events stream key - KEYS[8] marker key + KEYS[2] 'meta' + KEYS[3] 'id' + KEYS[4] 'completed' + KEYS[5] 'active' + KEYS[6] events stream key + KEYS[7] marker key ARGV[1] msgpacked arguments array [1] key prefix, @@ -43,7 +42,7 @@ jobId - OK -5 - Missing parent key ]] -local eventsKey = KEYS[7] +local eventsKey = KEYS[6] local jobId local jobIdKey @@ -64,8 +63,8 @@ local parentData --- @include "includes/addJobInTargetList" --- @include "includes/deduplicateJob" --- @include "includes/getOrSetMaxEvents" ---- @include "includes/getTargetQueueList" --- @include "includes/handleDuplicatedJob" +--- @include "includes/isQueuePausedOrMaxed" --- @include "includes/storeJob" if parentKey ~= nil then @@ -74,9 +73,10 @@ if parentKey ~= nil then parentData = cjson.encode(parent) end -local jobCounter = rcall("INCR", KEYS[4]) +local jobCounter = rcall("INCR", KEYS[3]) -local metaKey = KEYS[3] +local waitKey = KEYS[1] +local metaKey = KEYS[2] local maxEvents = getOrSetMaxEvents(metaKey) local parentDependenciesKey = args[7] @@ -89,7 +89,7 @@ else jobIdKey = args[1] .. jobId if rcall("EXISTS", jobIdKey) == 1 then return handleDuplicatedJob(jobIdKey, jobId, parentKey, parent, - parentData, parentDependenciesKey, KEYS[5], eventsKey, + parentData, parentDependenciesKey, KEYS[4], eventsKey, maxEvents, timestamp) end end @@ -104,11 +104,11 @@ end storeJob(eventsKey, jobIdKey, jobId, args[3], ARGV[2], opts, timestamp, parentKey, parentData, repeatJobKey) -local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[6], KEYS[1], KEYS[2]) +local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[5]) -- LIFO or FIFO local pushCmd = opts['lifo'] and 'RPUSH' or 'LPUSH' -addJobInTargetList(target, KEYS[8], pushCmd, isPausedOrMaxed, jobId) +addJobInTargetList(waitKey, KEYS[7], pushCmd, isPausedOrMaxed, jobId) -- Emit waiting event rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", "waiting", diff --git a/src/commands/changePriority-7.lua b/src/commands/changePriority-6.lua similarity index 65% rename from src/commands/changePriority-7.lua rename to src/commands/changePriority-6.lua index 3350f54fbd..e5bc4947b1 100644 --- a/src/commands/changePriority-7.lua +++ b/src/commands/changePriority-6.lua @@ -2,12 +2,11 @@ Change job priority Input: KEYS[1] 'wait', - KEYS[2] 'paused' - KEYS[3] 'meta' - KEYS[4] 'prioritized' - KEYS[5] 'active' - KEYS[6] 'pc' priority counter - KEYS[7] 'marker' + KEYS[2] 'meta' + KEYS[3] 'prioritized' + KEYS[4] 'active' + KEYS[5] 'pc' priority counter + KEYS[6] 'marker' ARGV[1] priority value ARGV[2] prefix key @@ -26,14 +25,14 @@ local rcall = redis.call -- Includes --- @include "includes/addJobInTargetList" --- @include "includes/addJobWithPriority" ---- @include "includes/getTargetQueueList" +--- @include "includes/isQueuePausedOrMaxed" --- @include "includes/pushBackJobWithPriority" -local function reAddJobWithNewPriority( prioritizedKey, markerKey, targetKey, +local function reAddJobWithNewPriority( prioritizedKey, markerKey, waitKey, priorityCounter, lifo, priority, jobId, isPausedOrMaxed) if priority == 0 then local pushCmd = lifo and 'RPUSH' or 'LPUSH' - addJobInTargetList(targetKey, markerKey, pushCmd, isPausedOrMaxed, jobId) + addJobInTargetList(waitKey, markerKey, pushCmd, isPausedOrMaxed, jobId) else if lifo then pushBackJobWithPriority(prioritizedKey, priority, jobId) @@ -45,18 +44,18 @@ local function reAddJobWithNewPriority( prioritizedKey, markerKey, targetKey, end if rcall("EXISTS", jobKey) == 1 then - local metaKey = KEYS[3] - local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[5], KEYS[1], KEYS[2]) - local prioritizedKey = KEYS[4] - local priorityCounterKey = KEYS[6] - local markerKey = KEYS[7] + local metaKey = KEYS[2] + local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[4]) + local prioritizedKey = KEYS[3] + local priorityCounterKey = KEYS[5] + local markerKey = KEYS[6] -- Re-add with the new priority if rcall("ZREM", prioritizedKey, jobId) > 0 then - reAddJobWithNewPriority( prioritizedKey, markerKey, target, + reAddJobWithNewPriority( prioritizedKey, markerKey, KEYS[1], priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed) - elseif rcall("LREM", target, -1, jobId) > 0 then - reAddJobWithNewPriority( prioritizedKey, markerKey, target, + elseif rcall("LREM", KEYS[1], -1, jobId) > 0 then + reAddJobWithNewPriority( prioritizedKey, markerKey, KEYS[1], priorityCounterKey, ARGV[4] == '1', priority, jobId, isPausedOrMaxed) end diff --git a/src/commands/executeMigrations-3.lua b/src/commands/executeMigrations-3.lua new file mode 100644 index 0000000000..1ef403629c --- /dev/null +++ b/src/commands/executeMigrations-3.lua @@ -0,0 +1,64 @@ +--[[ + Execute migrations. + + Input: + KEYS[1] meta key + KEYS[2] migrations key + KEYS[3] prefix key + + ARGV[1] current major version + ARGV[2] timestamp + ARGV[3] current migration execution +]] +local rcall = redis.call + +local currentMajorVersion = rcall("HGET", KEYS[1], "mv") + +local function getCurrentMigrationNumber(migrationKey) + local lastExecutedMigration = rcall("LRANGE", migrationKey, -1, -1) + + if #lastExecutedMigration > 0 then + return tonumber(string.match(lastExecutedMigration[1], "(.*)-.*-.*")) + 1 + else + return 1 + end +end + +local function saveMigration(migrationKey, migrationNumber, timestamp, migrationName) + rcall("RPUSH", migrationKey, migrationNumber .. "-" .. timestamp .. "-" .. migrationName) + return migrationNumber + 1 +end + +if currentMajorVersion then + if currentMajorVersion == ARGV[1] then + return 0 + end +else + local currentMigrationNumber = getCurrentMigrationNumber(KEYS[2]) + if currentMigrationNumber == 1 then + -- delete deprecated priority + rcall("DEL", KEYS[3] .. "priority") + currentMigrationNumber = saveMigration(KEYS[2], currentMigrationNumber, ARGV[2], "removeDeprecatedPriorityKey") + end + + local currentMigrationExecutionNumber = tonumber(ARGV[3]) + if currentMigrationNumber == 2 then + -- remove legacy markers + if currentMigrationNumber >= currentMigrationExecutionNumber then + return 2 + else + currentMigrationNumber = saveMigration(KEYS[2], currentMigrationNumber, ARGV[2], "removeLegacyMarkers") + end + end + + if currentMigrationNumber == 3 then + -- migrate deprecated paused key + if currentMigrationNumber >= currentMigrationExecutionNumber then + return 3 + else + currentMigrationNumber = saveMigration(KEYS[2], currentMigrationNumber, ARGV[2], "migrateDeprecatedPausedKey") + end + end + + return 0 +end diff --git a/src/commands/getCounts-1.lua b/src/commands/getCounts-1.lua index 0b5886cc80..f928186d1d 100644 --- a/src/commands/getCounts-1.lua +++ b/src/commands/getCounts-1.lua @@ -12,21 +12,7 @@ local results = {} for i = 1, #ARGV do local stateKey = prefix .. ARGV[i] - if ARGV[i] == "wait" or ARGV[i] == "paused" then - -- Markers in waitlist DEPRECATED in v5: Remove in v6. - local marker = rcall("LINDEX", stateKey, -1) - if marker and string.sub(marker, 1, 2) == "0:" then - local count = rcall("LLEN", stateKey) - if count > 1 then - rcall("RPOP", stateKey) - results[#results+1] = count-1 - else - results[#results+1] = 0 - end - else - results[#results+1] = rcall("LLEN", stateKey) - end - elseif ARGV[i] == "active" then + if ARGV[i] == "wait" or ARGV[i] == "paused" or ARGV[i] == "active" then results[#results+1] = rcall("LLEN", stateKey) else results[#results+1] = rcall("ZCARD", stateKey) diff --git a/src/commands/getCountsPerPriority-4.lua b/src/commands/getCountsPerPriority-4.lua index fa24ba2328..60f6a90d4c 100644 --- a/src/commands/getCountsPerPriority-4.lua +++ b/src/commands/getCountsPerPriority-4.lua @@ -21,10 +21,10 @@ local prioritizedKey = KEYS[4] for i = 1, #ARGV do local priority = tonumber(ARGV[i]) if priority == 0 then - if isQueuePaused(KEYS[3]) then - results[#results+1] = rcall("LLEN", pausedKey) + results[#results+1] = rcall("LLEN", waitKey) + if isQueuePaused(KEYS[3]) then -- TODO: remove in next breaking change + results[#results] = results[#results] + rcall("LLEN", pausedKey) else - results[#results+1] = rcall("LLEN", waitKey) end else results[#results+1] = rcall("ZCOUNT", prioritizedKey, diff --git a/src/commands/getRanges-1.lua b/src/commands/getRanges-1.lua index b9ead60286..c17f342e81 100644 --- a/src/commands/getRanges-1.lua +++ b/src/commands/getRanges-1.lua @@ -42,21 +42,7 @@ end for i = 4, #ARGV do local stateKey = prefix .. ARGV[i] - if ARGV[i] == "wait" or ARGV[i] == "paused" then - -- Markers in waitlist DEPRECATED in v5: Remove in v6. - local marker = rcall("LINDEX", stateKey, -1) - if marker and string.sub(marker, 1, 2) == "0:" then - local count = rcall("LLEN", stateKey) - if count > 1 then - rcall("RPOP", stateKey) - getRangeInList(stateKey, asc, rangeStart, rangeEnd, results) - else - results[#results+1] = {} - end - else - getRangeInList(stateKey, asc, rangeStart, rangeEnd, results) - end - elseif ARGV[i] == "active" then + if ARGV[i] == "wait" or ARGV[i] == "paused" or ARGV[i] == "active" then getRangeInList(stateKey, asc, rangeStart, rangeEnd, results) else if asc == "1" then diff --git a/src/commands/includes/cleanList.lua b/src/commands/includes/cleanList.lua index 5dbae4abc3..11b086940c 100644 --- a/src/commands/includes/cleanList.lua +++ b/src/commands/includes/cleanList.lua @@ -34,7 +34,7 @@ local function cleanList(listKey, jobKeyPrefix, rangeStart, rangeEnd, -- replace the entry with a deletion marker; the actual deletion will -- occur at the end of the script rcall("LSET", listKey, rangeEnd - jobIdsLen + i, deletionMarker) - removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]]) + removeJob(job, true, jobKeyPrefix, true --[[remove deduplication key]]) deletedCount = deletedCount + 1 table.insert(deleted, job) end diff --git a/src/commands/includes/cleanSet.lua b/src/commands/includes/cleanSet.lua index f85d41c259..30daf3a226 100644 --- a/src/commands/includes/cleanSet.lua +++ b/src/commands/includes/cleanSet.lua @@ -41,14 +41,14 @@ local function cleanSet( if not isJobSchedulerJob(job, jobSchedulersKey) then local jobKey = jobKeyPrefix .. job if isFinished then - removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] ) + removeJob(job, true, jobKeyPrefix, true --[[remove deduplication key]] ) deletedCount = deletedCount + 1 table.insert(deleted, job) else -- * finishedOn says when the job was completed, but it isn't set unless the job has actually completed jobTS = getTimestamp(jobKey, attributes) if (not jobTS or jobTS <= timestamp) then - removeJob(job, true, jobKeyPrefix, true --[[remove debounce key]] ) + removeJob(job, true, jobKeyPrefix, true --[[remove deduplication key]] ) deletedCount = deletedCount + 1 table.insert(deleted, job) end diff --git a/src/commands/includes/deduplicateJob.lua b/src/commands/includes/deduplicateJob.lua index ff873e599c..03f92ffb8b 100644 --- a/src/commands/includes/deduplicateJob.lua +++ b/src/commands/includes/deduplicateJob.lua @@ -1,5 +1,5 @@ --[[ - Function to debounce a job. + Function to deduplicate a job. ]] local function deduplicateJob(prefixKey, deduplicationOpts, jobId, deduplicationKey, eventsKey, maxEvents) @@ -13,12 +13,10 @@ local function deduplicateJob(prefixKey, deduplicationOpts, jobId, deduplication deduplicationKeyExists = not rcall('SET', deduplicationKey, jobId, 'NX') end if deduplicationKeyExists then - local currentDebounceJobId = rcall('GET', deduplicationKey) + local currentDeduplicationJobId = rcall('GET', deduplicationKey) rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", - "debounced", "jobId", currentDebounceJobId, "debounceId", deduplicationId) - rcall("XADD", eventsKey, "MAXLEN", "~", maxEvents, "*", "event", - "deduplicated", "jobId", currentDebounceJobId, "deduplicationId", deduplicationId) - return currentDebounceJobId + "deduplicated", "jobId", currentDeduplicationJobId, "deduplicationId", deduplicationId) + return currentDeduplicationJobId end end end diff --git a/src/commands/includes/getTargetQueueList.lua b/src/commands/includes/getTargetQueueList.lua deleted file mode 100644 index 2a7b03571a..0000000000 --- a/src/commands/includes/getTargetQueueList.lua +++ /dev/null @@ -1,22 +0,0 @@ ---[[ - Function to check for the meta.paused key to decide if we are paused or not - (since an empty list and !EXISTS are not really the same). -]] - -local function getTargetQueueList(queueMetaKey, activeKey, waitKey, pausedKey) - local queueAttributes = rcall("HMGET", queueMetaKey, "paused", "concurrency") - - if queueAttributes[1] then - return pausedKey, true - else - if queueAttributes[2] then - local activeCount = rcall("LLEN", activeKey) - if activeCount >= tonumber(queueAttributes[2]) then - return waitKey, true - else - return waitKey, false - end - end - end - return waitKey, false -end diff --git a/src/commands/includes/getWaitPlusPrioritizedCount.lua b/src/commands/includes/getWaitPlusPrioritizedCount.lua new file mode 100644 index 0000000000..019fcbed87 --- /dev/null +++ b/src/commands/includes/getWaitPlusPrioritizedCount.lua @@ -0,0 +1,11 @@ +--[[ + Get count jobs in wait or prioritized. +]] + +local function getWaitPlusPrioritizedCount(waitKey, prioritizedKey) + local waitCount = rcall("LLEN", waitKey) + local prioritizedCount = rcall("ZCARD", prioritizedKey) + + return waitCount + prioritizedCount +end + \ No newline at end of file diff --git a/src/commands/includes/moveParentFromWaitingChildrenToFailed.lua b/src/commands/includes/moveParentFromWaitingChildrenToFailed.lua index 4ad4723506..bf54b8ce68 100644 --- a/src/commands/includes/moveParentFromWaitingChildrenToFailed.lua +++ b/src/commands/includes/moveParentFromWaitingChildrenToFailed.lua @@ -1,9 +1,9 @@ --[[ - Function to recursively move from waitingChildren to failed. + Function to move from waitingChildren to failed. + Returns parent data and failedReason from parent. ]] -- Includes ---- @include "moveParentToWaitIfNeeded" --- @include "removeDeduplicationKeyIfNeeded" local function moveParentFromWaitingChildrenToFailed( parentQueueKey, parentKey, parentId, jobIdKey, timestamp) @@ -19,27 +19,9 @@ local function moveParentFromWaitingChildrenToFailed( parentQueueKey, parentKey, removeDeduplicationKeyIfNeeded(parentQueueKey .. ":", jobAttributes[2]) if jobAttributes[1] then - local parentData = cjson.decode(jobAttributes[1]) - if parentData['fpof'] then - moveParentFromWaitingChildrenToFailed( - parentData['queueKey'], - parentData['queueKey'] .. ':' .. parentData['id'], - parentData['id'], - parentKey, - timestamp - ) - elseif parentData['idof'] or parentData['rdof'] then - local grandParentKey = parentData['queueKey'] .. ':' .. parentData['id'] - local grandParentDependenciesSet = grandParentKey .. ":dependencies" - if rcall("SREM", grandParentDependenciesSet, parentKey) == 1 then - moveParentToWaitIfNeeded(parentData['queueKey'], grandParentDependenciesSet, - grandParentKey, parentData['id'], timestamp) - if parentData['idof'] then - local grandParentFailedSet = grandParentKey .. ":failed" - rcall("HSET", grandParentFailedSet, parentKey, failedReason) - end - end - end + return cjson.decode(jobAttributes[1]), failedReason end + + return nil, nil end end diff --git a/src/commands/includes/moveParentIfNeeded.lua b/src/commands/includes/moveParentIfNeeded.lua new file mode 100644 index 0000000000..86c47a4761 --- /dev/null +++ b/src/commands/includes/moveParentIfNeeded.lua @@ -0,0 +1,62 @@ +--[[ + Validate and move parent to active if needed. +]] + +-- Includes +--- @include "moveParentFromWaitingChildrenToFailed" +--- @include "moveParentToWaitIfNeeded" + +local function moveParentIfNeeded(parentData, parentKey, jobIdKey, + failedReason, timestamp) + if parentData['ocf'] then + if parentData['ocf'] == 'f' then + local grandParentData, parentFailedReason = moveParentFromWaitingChildrenToFailed( + parentData['queueKey'], + parentKey, + parentData['id'], + jobIdKey, + timestamp) + if grandParentData then + moveParentIfNeeded(grandParentData, grandParentData['queueKey'] .. ':' .. grandParentData['id'], + parentKey, parentFailedReason, timestamp) + end + elseif parentData['ocf'] == 'i' or parentData['ocf'] == 'r' then + local dependenciesSet = parentKey .. ":dependencies" + if rcall("SREM", dependenciesSet, jobIdKey) == 1 then + moveParentToWaitIfNeeded(parentData['queueKey'], dependenciesSet, + parentKey, parentData['id'], timestamp) + if parentData['ocf'] == 'i' then + local failedSet = parentKey .. ":failed" + rcall("HSET", failedSet, jobIdKey, failedReason) + end + end + end + else + if parentData['fpof'] then + local grandParentData, parentFailedReason = moveParentFromWaitingChildrenToFailed(parentData['queueKey'], + parentKey, parentData['id'], jobIdKey, + timestamp) + if grandParentData then + moveParentIfNeeded(grandParentData, grandParentData['queueKey'] .. ':' .. grandParentData['id'], + parentKey, parentFailedReason, timestamp) + end + elseif parentData['idof'] or parentData['rdof'] then + local dependenciesSet = parentKey .. ":dependencies" + if rcall("SREM", dependenciesSet, jobIdKey) == 1 then + moveParentToWaitIfNeeded(parentData['queueKey'], dependenciesSet, + parentKey, parentData['id'], timestamp) + if parentData['idof'] then + local failedSet = parentKey .. ":failed" + rcall("HSET", failedSet, jobIdKey, failedReason) + end + end + else + local grandParentData, parentFailedReason = moveParentFromWaitingChildrenToFailed(parentData['queueKey'], + parentKey, parentData['id'], jobIdKey, timestamp) + if grandParentData then + moveParentIfNeeded(grandParentData, grandParentData['queueKey'] .. ':' .. grandParentData['id'], + parentKey, parentFailedReason, timestamp) + end + end + end +end diff --git a/src/commands/includes/moveParentToWaitIfNeeded.lua b/src/commands/includes/moveParentToWaitIfNeeded.lua index 33d9bcb031..ca4ee7f97b 100644 --- a/src/commands/includes/moveParentToWaitIfNeeded.lua +++ b/src/commands/includes/moveParentToWaitIfNeeded.lua @@ -7,13 +7,11 @@ --- @include "addJobInTargetList" --- @include "addJobWithPriority" --- @include "isQueuePausedOrMaxed" ---- @include "getTargetQueueList" local function moveParentToWaitIfNeeded(parentQueueKey, parentDependenciesKey, parentKey, parentId, timestamp) - local isParentActive = rcall("ZSCORE", - parentQueueKey .. ":waiting-children", parentId) - if rcall("SCARD", parentDependenciesKey) == 0 and isParentActive then + if rcall("SCARD", parentDependenciesKey) == 0 and rcall("ZSCORE", + parentQueueKey .. ":waiting-children", parentId) then rcall("ZREM", parentQueueKey .. ":waiting-children", parentId) local parentWaitKey = parentQueueKey .. ":wait" local parentPausedKey = parentQueueKey .. ":paused" @@ -36,10 +34,9 @@ local function moveParentToWaitIfNeeded(parentQueueKey, parentDependenciesKey, addDelayMarkerIfNeeded(parentMarkerKey, parentDelayedKey) else if priority == 0 then - local parentTarget, isParentPausedOrMaxed = - getTargetQueueList(parentMetaKey, parentActiveKey, parentWaitKey, - parentPausedKey) - addJobInTargetList(parentTarget, parentMarkerKey, "RPUSH", isParentPausedOrMaxed, + local isParentPausedOrMaxed = + isQueuePausedOrMaxed(parentMetaKey, parentActiveKey) + addJobInTargetList(parentWaitKey, parentMarkerKey, "RPUSH", isParentPausedOrMaxed, parentId) else local isPausedOrMaxed = isQueuePausedOrMaxed(parentMetaKey, parentActiveKey) diff --git a/src/commands/includes/removeDeduplicationKey.lua b/src/commands/includes/removeDeduplicationKey.lua index b93e9e29f8..a4b2a461d7 100644 --- a/src/commands/includes/removeDeduplicationKey.lua +++ b/src/commands/includes/removeDeduplicationKey.lua @@ -9,4 +9,3 @@ local function removeDeduplicationKey(prefixKey, jobKey) rcall("DEL", deduplicationKey) end end - \ No newline at end of file diff --git a/src/commands/includes/removeJobs.lua b/src/commands/includes/removeJobs.lua index 58b67abd97..f3882a7cb4 100644 --- a/src/commands/includes/removeJobs.lua +++ b/src/commands/includes/removeJobs.lua @@ -7,7 +7,7 @@ local function removeJobs(keys, hard, baseKey, max) for i, key in ipairs(keys) do - removeJob(key, hard, baseKey, true --[[remove debounce key]]) + removeJob(key, hard, baseKey, true --[[remove deduplication key]]) end return max - #keys end diff --git a/src/commands/includes/removeJobsByMaxAge.lua b/src/commands/includes/removeJobsByMaxAge.lua index ca24fad3a7..13ff15944e 100644 --- a/src/commands/includes/removeJobsByMaxAge.lua +++ b/src/commands/includes/removeJobsByMaxAge.lua @@ -5,12 +5,11 @@ -- Includes --- @include "removeJob" -local function removeJobsByMaxAge(timestamp, maxAge, targetSet, prefix, - shouldRemoveDebounceKey) +local function removeJobsByMaxAge(timestamp, maxAge, targetSet, prefix) local start = timestamp - maxAge * 1000 local jobIds = rcall("ZREVRANGEBYSCORE", targetSet, start, "-inf") for i, jobId in ipairs(jobIds) do - removeJob(jobId, false, prefix, false --[[remove debounce key]]) + removeJob(jobId, false, prefix, false --[[remove deduplication key]]) end rcall("ZREMRANGEBYSCORE", targetSet, "-inf", start) end diff --git a/src/commands/includes/removeJobsByMaxCount.lua b/src/commands/includes/removeJobsByMaxCount.lua index af52c612c4..f4e165d40e 100644 --- a/src/commands/includes/removeJobsByMaxCount.lua +++ b/src/commands/includes/removeJobsByMaxCount.lua @@ -9,7 +9,7 @@ local function removeJobsByMaxCount(maxCount, targetSet, prefix) local start = maxCount local jobIds = rcall("ZREVRANGE", targetSet, start, -1) for i, jobId in ipairs(jobIds) do - removeJob(jobId, false, prefix, false --[[remove debounce key]]) + removeJob(jobId, false, prefix, false --[[remove deduplication key]]) end rcall("ZREMRANGEBYRANK", targetSet, 0, -(maxCount + 1)) end diff --git a/src/commands/includes/removeParentDependencyKey.lua b/src/commands/includes/removeParentDependencyKey.lua index 87262850c9..8544db0538 100644 --- a/src/commands/includes/removeParentDependencyKey.lua +++ b/src/commands/includes/removeParentDependencyKey.lua @@ -7,13 +7,12 @@ -- Includes --- @include "addJobInTargetList" --- @include "destructureJobKey" ---- @include "getTargetQueueList" +--- @include "isQueuePausedOrMaxed" --- @include "removeJobKeys" local function moveParentToWait(parentPrefix, parentId, emitEvent) - local parentTarget, isPausedOrMaxed = getTargetQueueList(parentPrefix .. "meta", parentPrefix .. "active", - parentPrefix .. "wait", parentPrefix .. "paused") - addJobInTargetList(parentTarget, parentPrefix .. "marker", "RPUSH", isPausedOrMaxed, parentId) + local isPausedOrMaxed = isQueuePausedOrMaxed(parentPrefix .. "meta", parentPrefix .. "active") + addJobInTargetList(parentPrefix .. "wait", parentPrefix .. "marker", "RPUSH", isPausedOrMaxed, parentId) if emitEvent then local parentEventStream = parentPrefix .. "events" @@ -21,7 +20,7 @@ local function moveParentToWait(parentPrefix, parentId, emitEvent) end end -local function removeParentDependencyKey(jobKey, hard, parentKey, baseKey, debounceId) +local function removeParentDependencyKey(jobKey, hard, parentKey, baseKey, deduplicationId) if parentKey then local parentDependenciesKey = parentKey .. ":dependencies" local result = rcall("SREM", parentDependenciesKey, jobKey) @@ -38,8 +37,8 @@ local function removeParentDependencyKey(jobKey, hard, parentKey, baseKey, debou if parentPrefix == baseKey then removeParentDependencyKey(parentKey, hard, nil, baseKey, nil) removeJobKeys(parentKey) - if debounceId then - rcall("DEL", parentPrefix .. "de:" .. debounceId) + if deduplicationId then + rcall("DEL", parentPrefix .. "de:" .. deduplicationId) end else moveParentToWait(parentPrefix, parentId) diff --git a/src/commands/includes/storeJob.lua b/src/commands/includes/storeJob.lua index ef53c9cba1..e0c5a254dc 100644 --- a/src/commands/includes/storeJob.lua +++ b/src/commands/includes/storeJob.lua @@ -6,7 +6,7 @@ local function storeJob(eventsKey, jobIdKey, jobId, name, data, opts, timestamp, local jsonOpts = cjson.encode(opts) local delay = opts['delay'] or 0 local priority = opts['priority'] or 0 - local debounceId = opts['de'] and opts['de']['id'] + local deduplicationId = opts['de'] and opts['de']['id'] local optionalValues = {} if parentKey ~= nil then @@ -21,9 +21,9 @@ local function storeJob(eventsKey, jobIdKey, jobId, name, data, opts, timestamp, table.insert(optionalValues, repeatJobKey) end - if debounceId then + if deduplicationId then table.insert(optionalValues, "deid") - table.insert(optionalValues, debounceId) + table.insert(optionalValues, deduplicationId) end rcall("HMSET", jobIdKey, "name", name, "data", data, "opts", jsonOpts, diff --git a/src/commands/migrateDeprecatedPausedKey-2.lua b/src/commands/migrateDeprecatedPausedKey-2.lua new file mode 100644 index 0000000000..806039c9a5 --- /dev/null +++ b/src/commands/migrateDeprecatedPausedKey-2.lua @@ -0,0 +1,18 @@ +--[[ + Move paused job ids to wait state to repair these states + + Input: + KEYS[1] paused key + KEYS[2] wait key +]] + +local rcall = redis.call + +local hasJobsInPaused = rcall("EXISTS", KEYS[1]) == 1 + +if hasJobsInPaused then + rcall("RENAME", KEYS[1], KEYS[2]) +end + +return 0 + \ No newline at end of file diff --git a/src/commands/moveJobFromActiveToWait-10.lua b/src/commands/moveJobFromActiveToWait-9.lua similarity index 62% rename from src/commands/moveJobFromActiveToWait-10.lua rename to src/commands/moveJobFromActiveToWait-9.lua index e90d6d2d10..f340021ce7 100644 --- a/src/commands/moveJobFromActiveToWait-10.lua +++ b/src/commands/moveJobFromActiveToWait-9.lua @@ -6,12 +6,11 @@ KEYS[3] stalled key KEYS[4] job lock key - KEYS[5] paused key - KEYS[6] meta key - KEYS[7] limiter key - KEYS[8] prioritized key - KEYS[9] marker key - KEYS[10] event key + KEYS[5] meta key + KEYS[6] limiter key + KEYS[7] prioritized key + KEYS[8] marker key + KEYS[9] event key ARGV[1] job id ARGV[2] lock token @@ -23,28 +22,28 @@ local rcall = redis.call --- @include "includes/addJobInTargetList" --- @include "includes/pushBackJobWithPriority" --- @include "includes/getOrSetMaxEvents" ---- @include "includes/getTargetQueueList" +--- @include "includes/isQueuePausedOrMaxed" local jobId = ARGV[1] local token = ARGV[2] local lockKey = KEYS[4] local lockToken = rcall("GET", lockKey) -local pttl = rcall("PTTL", KEYS[7]) +local pttl = rcall("PTTL", KEYS[6]) if lockToken == token then - local metaKey = KEYS[6] + local metaKey = KEYS[5] local removed = rcall("LREM", KEYS[1], 1, jobId) if removed > 0 then - local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[1], KEYS[2], KEYS[5]) + local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[1]) rcall("SREM", KEYS[3], jobId) local priority = tonumber(rcall("HGET", ARGV[3], "priority")) or 0 if priority > 0 then - pushBackJobWithPriority(KEYS[8], priority, jobId) + pushBackJobWithPriority(KEYS[7], priority, jobId) else - addJobInTargetList(target, KEYS[9], "RPUSH", isPausedOrMaxed, jobId) + addJobInTargetList(KEYS[2], KEYS[8], "RPUSH", isPausedOrMaxed, jobId) end rcall("DEL", lockKey) @@ -52,7 +51,7 @@ if lockToken == token then local maxEvents = getOrSetMaxEvents(metaKey) -- Emit waiting event - rcall("XADD", KEYS[10], "MAXLEN", "~", maxEvents, "*", "event", "waiting", + rcall("XADD", KEYS[9], "MAXLEN", "~", maxEvents, "*", "event", "waiting", "jobId", jobId) end end diff --git a/src/commands/moveJobsToWait-8.lua b/src/commands/moveJobsToWait-7.lua similarity index 82% rename from src/commands/moveJobsToWait-8.lua rename to src/commands/moveJobsToWait-7.lua index 15e99c6295..09cf80708e 100644 --- a/src/commands/moveJobsToWait-8.lua +++ b/src/commands/moveJobsToWait-7.lua @@ -8,10 +8,9 @@ KEYS[2] events stream KEYS[3] state key (failed, completed, delayed) KEYS[4] 'wait' - KEYS[5] 'paused' - KEYS[6] 'meta' - KEYS[7] 'active' - KEYS[8] 'marker' + KEYS[5] 'meta' + KEYS[6] 'active' + KEYS[7] 'marker' ARGV[1] count ARGV[2] timestamp @@ -30,10 +29,10 @@ local rcall = redis.call; --- @include "includes/addBaseMarkerIfNeeded" --- @include "includes/batches" --- @include "includes/getOrSetMaxEvents" ---- @include "includes/getTargetQueueList" +--- @include "includes/isQueuePausedOrMaxed" -local metaKey = KEYS[6] -local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[7], KEYS[4], KEYS[5]) +local metaKey = KEYS[5] +local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[6]) local jobs = rcall('ZRANGEBYSCORE', KEYS[3], 0, timestamp, 'LIMIT', 0, maxCount) if (#jobs > 0) then @@ -60,10 +59,10 @@ if (#jobs > 0) then for from, to in batches(#jobs, 7000) do rcall("ZREM", KEYS[3], unpack(jobs, from, to)) - rcall("LPUSH", target, unpack(jobs, from, to)) + rcall("LPUSH", KEYS[4], unpack(jobs, from, to)) end - addBaseMarkerIfNeeded(KEYS[8], isPausedOrMaxed) + addBaseMarkerIfNeeded(KEYS[7], isPausedOrMaxed) end maxCount = maxCount - #jobs diff --git a/src/commands/moveStalledJobsToWait-8.lua b/src/commands/moveStalledJobsToWait-8.lua new file mode 100644 index 0000000000..c6e82aa03a --- /dev/null +++ b/src/commands/moveStalledJobsToWait-8.lua @@ -0,0 +1,152 @@ +--[[ + Move stalled jobs to wait. + + Input: + KEYS[1] 'stalled' (SET) + KEYS[2] 'wait', (LIST) + KEYS[3] 'active', (LIST) + KEYS[4] 'failed', (ZSET) + KEYS[5] 'stalled-check', (KEY) + KEYS[6] 'meta', (KEY) + KEYS[7] 'marker' + KEYS[8] 'event stream' (STREAM) + + ARGV[1] Max stalled job count + ARGV[2] queue.toKey('') + ARGV[3] timestamp + ARGV[4] max check time + + Events: + 'stalled' with stalled job id. +]] + +local rcall = redis.call + +-- Includes +--- @include "includes/addJobInTargetList" +--- @include "includes/batches" +--- @include "includes/isQueuePausedOrMaxed" +--- @include "includes/moveParentIfNeeded" +--- @include "includes/removeDeduplicationKeyIfNeeded" +--- @include "includes/removeJob" +--- @include "includes/removeJobsByMaxAge" +--- @include "includes/removeJobsByMaxCount" +--- @include "includes/trimEvents" + +local stalledKey = KEYS[1] +local waitKey = KEYS[2] +local activeKey = KEYS[3] +local failedKey = KEYS[4] +local stalledCheckKey = KEYS[5] +local metaKey = KEYS[6] +local markerKey = KEYS[7] +local eventStreamKey = KEYS[8] +local maxStalledJobCount = tonumber(ARGV[1]) +local queueKeyPrefix = ARGV[2] +local timestamp = ARGV[3] +local maxCheckTime = ARGV[4] + +if rcall("EXISTS", stalledCheckKey) == 1 then return {{}, {}} end + +rcall("SET", stalledCheckKey, timestamp, "PX", maxCheckTime) + +-- Trim events before emiting them to avoid trimming events emitted in this script +trimEvents(metaKey, eventStreamKey) + +-- Move all stalled jobs to wait +local stalling = rcall('SMEMBERS', stalledKey) +local stalled = {} +local failed = {} +if (#stalling > 0) then + rcall('DEL', stalledKey) + + -- Remove from active list + for i, jobId in ipairs(stalling) do + local jobKey = queueKeyPrefix .. jobId + + -- Check that the lock is also missing, then we can handle this job as really stalled. + if (rcall("EXISTS", jobKey .. ":lock") == 0) then + -- Remove from the active queue. + local removed = rcall("LREM", activeKey, 1, jobId) + + if (removed > 0) then + -- If this job has been stalled too many times, such as if it crashes the worker, then fail it. + local stalledCount = + rcall("HINCRBY", jobKey, "stalledCounter", 1) + if (stalledCount > maxStalledJobCount) then + local jobAttributes = rcall("HMGET", jobKey, "opts", "parent", "deid") + local rawOpts = jobAttributes[1] + local rawParentData = jobAttributes[2] + local opts = cjson.decode(rawOpts) + local removeOnFailType = type(opts["removeOnFail"]) + rcall("ZADD", failedKey, timestamp, jobId) + removeDeduplicationKeyIfNeeded(queueKeyPrefix, jobAttributes[3]) + + local failedReason = + "job stalled more than allowable limit" + rcall("HMSET", jobKey, "failedReason", failedReason, + "finishedOn", timestamp) + rcall("XADD", eventStreamKey, "*", "event", + "failed", "jobId", jobId, 'prev', 'active', + 'failedReason', failedReason) + + if rawParentData ~= false then + local parentData = cjson.decode(rawParentData) + moveParentIfNeeded(parentData, parentData['queueKey'] .. ':' .. parentData['id'], jobKey, + failedReason, timestamp) + end + + if removeOnFailType == "number" then + removeJobsByMaxCount(opts["removeOnFail"], + failedKey, queueKeyPrefix) + elseif removeOnFailType == "boolean" then + if opts["removeOnFail"] then + removeJob(jobId, false, queueKeyPrefix, + false --[[remove deduplication key]]) + rcall("ZREM", failedKey, jobId) + end + elseif removeOnFailType ~= "nil" then + local maxAge = opts["removeOnFail"]["age"] + local maxCount = opts["removeOnFail"]["count"] + + if maxAge ~= nil then + removeJobsByMaxAge(timestamp, maxAge, + failedKey, queueKeyPrefix) + end + + if maxCount ~= nil and maxCount > 0 then + removeJobsByMaxCount(maxCount, failedKey, + queueKeyPrefix) + end + end + + table.insert(failed, jobId) + else + local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, activeKey) + + -- Move the job back to the wait queue, to immediately be picked up by a waiting worker. + addJobInTargetList(waitKey, markerKey, "RPUSH", isPausedOrMaxed, jobId) + + rcall("XADD", eventStreamKey, "*", "event", + "waiting", "jobId", jobId, 'prev', 'active') + + -- Emit the stalled event + rcall("XADD", eventStreamKey, "*", "event", + "stalled", "jobId", jobId) + table.insert(stalled, jobId) + end + end + end + end +end + +-- Mark potentially stalled jobs +local active = rcall('LRANGE', activeKey, 0, -1) + +if (#active > 0) then + for from, to in batches(#active, 7000) do + rcall('SADD', stalledKey, unpack(active, from, to)) + end +end + +return {failed, stalled} \ No newline at end of file diff --git a/src/commands/moveStalledJobsToWait-9.lua b/src/commands/moveStalledJobsToWait-9.lua deleted file mode 100644 index 2e6161ebee..0000000000 --- a/src/commands/moveStalledJobsToWait-9.lua +++ /dev/null @@ -1,181 +0,0 @@ ---[[ - Move stalled jobs to wait. - - Input: - KEYS[1] 'stalled' (SET) - KEYS[2] 'wait', (LIST) - KEYS[3] 'active', (LIST) - KEYS[4] 'failed', (ZSET) - KEYS[5] 'stalled-check', (KEY) - KEYS[6] 'meta', (KEY) - KEYS[7] 'paused', (LIST) - KEYS[8] 'marker' - KEYS[9] 'event stream' (STREAM) - - ARGV[1] Max stalled job count - ARGV[2] queue.toKey('') - ARGV[3] timestamp - ARGV[4] max check time - - Events: - 'stalled' with stalled job id. -]] - -local rcall = redis.call - --- Includes ---- @include "includes/addJobInTargetList" ---- @include "includes/batches" ---- @include "includes/getTargetQueueList" ---- @include "includes/moveParentFromWaitingChildrenToFailed" ---- @include "includes/moveParentToWaitIfNeeded" ---- @include "includes/removeDeduplicationKeyIfNeeded" ---- @include "includes/removeJob" ---- @include "includes/removeJobsByMaxAge" ---- @include "includes/removeJobsByMaxCount" ---- @include "includes/trimEvents" - -local stalledKey = KEYS[1] -local waitKey = KEYS[2] -local activeKey = KEYS[3] -local failedKey = KEYS[4] -local stalledCheckKey = KEYS[5] -local metaKey = KEYS[6] -local pausedKey = KEYS[7] -local markerKey = KEYS[8] -local eventStreamKey = KEYS[9] -local maxStalledJobCount = tonumber(ARGV[1]) -local queueKeyPrefix = ARGV[2] -local timestamp = ARGV[3] -local maxCheckTime = ARGV[4] - -if rcall("EXISTS", stalledCheckKey) == 1 then return {{}, {}} end - -rcall("SET", stalledCheckKey, timestamp, "PX", maxCheckTime) - --- Trim events before emiting them to avoid trimming events emitted in this script -trimEvents(metaKey, eventStreamKey) - --- Move all stalled jobs to wait -local stalling = rcall('SMEMBERS', stalledKey) -local stalled = {} -local failed = {} -if (#stalling > 0) then - rcall('DEL', stalledKey) - - -- Remove from active list - for i, jobId in ipairs(stalling) do - -- Markers in waitlist DEPRECATED in v5: Remove in v6. - if string.sub(jobId, 1, 2) == "0:" then - -- If the jobId is a delay marker ID we just remove it. - rcall("LREM", activeKey, 1, jobId) - else - local jobKey = queueKeyPrefix .. jobId - - -- Check that the lock is also missing, then we can handle this job as really stalled. - if (rcall("EXISTS", jobKey .. ":lock") == 0) then - -- Remove from the active queue. - local removed = rcall("LREM", activeKey, 1, jobId) - - if (removed > 0) then - -- If this job has been stalled too many times, such as if it crashes the worker, then fail it. - local stalledCount = - rcall("HINCRBY", jobKey, "stalledCounter", 1) - if (stalledCount > maxStalledJobCount) then - local jobAttributes = rcall("HMGET", jobKey, "opts", "parent", "deid") - local rawOpts = jobAttributes[1] - local rawParentData = jobAttributes[2] - local opts = cjson.decode(rawOpts) - local removeOnFailType = type(opts["removeOnFail"]) - rcall("ZADD", failedKey, timestamp, jobId) - removeDeduplicationKeyIfNeeded(queueKeyPrefix, jobAttributes[3]) - - local failedReason = - "job stalled more than allowable limit" - rcall("HMSET", jobKey, "failedReason", failedReason, - "finishedOn", timestamp) - rcall("XADD", eventStreamKey, "*", "event", - "failed", "jobId", jobId, 'prev', 'active', - 'failedReason', failedReason) - - if rawParentData ~= false then - if opts['fpof'] then - local parentData = cjson.decode(rawParentData) - moveParentFromWaitingChildrenToFailed( - parentData['queueKey'], - parentData['queueKey'] .. ':' .. parentData['id'], - parentData['id'], - jobKey, - timestamp - ) - elseif opts['idof'] or opts['rdof'] then - local parentData = cjson.decode(rawParentData) - local parentKey = parentData['queueKey'] .. ':' .. parentData['id'] - local dependenciesSet = parentKey .. ":dependencies" - if rcall("SREM", dependenciesSet, jobKey) == 1 then - moveParentToWaitIfNeeded(parentData['queueKey'], dependenciesSet, - parentKey, parentData['id'], timestamp) - if opts['idof'] then - local failedSet = parentKey .. ":failed" - rcall("HSET", failedSet, jobKey, failedReason) - end - end - end - end - - if removeOnFailType == "number" then - removeJobsByMaxCount(opts["removeOnFail"], - failedKey, queueKeyPrefix) - elseif removeOnFailType == "boolean" then - if opts["removeOnFail"] then - removeJob(jobId, false, queueKeyPrefix, - false --[[remove debounce key]]) - rcall("ZREM", failedKey, jobId) - end - elseif removeOnFailType ~= "nil" then - local maxAge = opts["removeOnFail"]["age"] - local maxCount = opts["removeOnFail"]["count"] - - if maxAge ~= nil then - removeJobsByMaxAge(timestamp, maxAge, - failedKey, queueKeyPrefix) - end - - if maxCount ~= nil and maxCount > 0 then - removeJobsByMaxCount(maxCount, failedKey, - queueKeyPrefix) - end - end - - table.insert(failed, jobId) - else - local target, isPausedOrMaxed = - getTargetQueueList(metaKey, activeKey, waitKey, pausedKey) - - -- Move the job back to the wait queue, to immediately be picked up by a waiting worker. - addJobInTargetList(target, markerKey, "RPUSH", isPausedOrMaxed, jobId) - - rcall("XADD", eventStreamKey, "*", "event", - "waiting", "jobId", jobId, 'prev', 'active') - - -- Emit the stalled event - rcall("XADD", eventStreamKey, "*", "event", - "stalled", "jobId", jobId) - table.insert(stalled, jobId) - end - end - end - end - end -end - --- Mark potentially stalled jobs -local active = rcall('LRANGE', activeKey, 0, -1) - -if (#active > 0) then - for from, to in batches(#active, 7000) do - rcall('SADD', stalledKey, unpack(active, from, to)) - end -end - -return {failed, stalled} \ No newline at end of file diff --git a/src/commands/moveToActive-11.lua b/src/commands/moveToActive-10.lua similarity index 78% rename from src/commands/moveToActive-11.lua rename to src/commands/moveToActive-10.lua index 064f3fecef..d1343cded7 100644 --- a/src/commands/moveToActive-11.lua +++ b/src/commands/moveToActive-10.lua @@ -18,12 +18,11 @@ KEYS[7] delayed key -- Delayed jobs - KEYS[8] paused key - KEYS[9] meta key - KEYS[10] pc priority counter + KEYS[8] meta key + KEYS[9] pc priority counter -- Marker - KEYS[11] marker key + KEYS[10] marker key -- Arguments ARGV[1] key prefix @@ -45,17 +44,17 @@ local opts = cmsgpack.unpack(ARGV[3]) -- Includes --- @include "includes/getNextDelayedTimestamp" --- @include "includes/getRateLimitTTL" ---- @include "includes/getTargetQueueList" +--- @include "includes/isQueuePausedOrMaxed" --- @include "includes/moveJobFromPriorityToActive" --- @include "includes/prepareJobForProcessing" --- @include "includes/promoteDelayedJobs" -local target, isPausedOrMaxed = getTargetQueueList(KEYS[9], activeKey, waitKey, KEYS[8]) +local isPausedOrMaxed = isQueuePausedOrMaxed(KEYS[8], activeKey) -- Check if there are delayed jobs that we can move to wait. -local markerKey = KEYS[11] -promoteDelayedJobs(delayedKey, markerKey, target, KEYS[3], eventStreamKey, ARGV[1], - ARGV[2], KEYS[10], isPausedOrMaxed) +local markerKey = KEYS[10] +promoteDelayedJobs(delayedKey, markerKey, KEYS[1], KEYS[3], eventStreamKey, ARGV[1], + ARGV[2], KEYS[9], isPausedOrMaxed) local maxJobs = tonumber(opts['limiter'] and opts['limiter']['max']) local expireTime = getRateLimitTTL(maxJobs, rateLimiterKey) @@ -69,17 +68,11 @@ if isPausedOrMaxed then return {0, 0, 0, 0} end -- no job ID, try non-blocking move from wait to active local jobId = rcall("RPOPLPUSH", waitKey, activeKey) --- Markers in waitlist DEPRECATED in v5: Will be completely removed in v6. -if jobId and string.sub(jobId, 1, 2) == "0:" then - rcall("LREM", activeKey, 1, jobId) - jobId = rcall("RPOPLPUSH", waitKey, activeKey) -end - if jobId then return prepareJobForProcessing(ARGV[1], rateLimiterKey, eventStreamKey, jobId, ARGV[2], maxJobs, markerKey, opts) else - jobId = moveJobFromPriorityToActive(KEYS[3], activeKey, KEYS[10]) + jobId = moveJobFromPriorityToActive(KEYS[3], activeKey, KEYS[9]) if jobId then return prepareJobForProcessing(ARGV[1], rateLimiterKey, eventStreamKey, jobId, ARGV[2], maxJobs, markerKey, opts) diff --git a/src/commands/moveToFinished-14.lua b/src/commands/moveToFinished-13.lua similarity index 68% rename from src/commands/moveToFinished-14.lua rename to src/commands/moveToFinished-13.lua index 616ee5dada..2694136f09 100644 --- a/src/commands/moveToFinished-14.lua +++ b/src/commands/moveToFinished-13.lua @@ -15,14 +15,13 @@ KEYS[6] rate limiter key KEYS[7] delayed key - KEYS[8] paused key - KEYS[9] meta key - KEYS[10] pc priority counter + KEYS[8] meta key + KEYS[9] pc priority counter - KEYS[11] completed/failed key - KEYS[12] jobId key - KEYS[13] metrics key - KEYS[14] marker key + KEYS[10] completed/failed key + KEYS[11] jobId key + KEYS[12] metrics key + KEYS[13] marker key ARGV[1] jobId ARGV[2] timestamp @@ -38,9 +37,6 @@ opts - lockDuration - lock duration in milliseconds opts - attempts max attempts opts - maxMetricsSize - opts - fpof - fail parent on fail - opts - idof - ignore dependency on fail - opts - rdof - remove dependency on fail Output: 0 OK @@ -59,10 +55,9 @@ local rcall = redis.call --- @include "includes/collectMetrics" --- @include "includes/getNextDelayedTimestamp" --- @include "includes/getRateLimitTTL" ---- @include "includes/getTargetQueueList" +--- @include "includes/isQueuePausedOrMaxed" --- @include "includes/moveJobFromPriorityToActive" ---- @include "includes/moveParentFromWaitingChildrenToFailed" ---- @include "includes/moveParentToWaitIfNeeded" +--- @include "includes/moveParentIfNeeded" --- @include "includes/prepareJobForProcessing" --- @include "includes/promoteDelayedJobs" --- @include "includes/removeDeduplicationKeyIfNeeded" @@ -74,7 +69,7 @@ local rcall = redis.call --- @include "includes/trimEvents" --- @include "includes/updateParentDepsIfNeeded" -local jobIdKey = KEYS[12] +local jobIdKey = KEYS[11] if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists local opts = cmsgpack.unpack(ARGV[8]) @@ -95,14 +90,6 @@ if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists end local jobAttributes = rcall("HMGET", jobIdKey, "parentKey", "parent", "deid") - local parentKey = jobAttributes[1] or "" - local parentId = "" - local parentQueueKey = "" - if jobAttributes[2] ~= false then - local jsonDecodedParent = cjson.decode(jobAttributes[2]) - parentId = jsonDecodedParent['id'] - parentQueueKey = jsonDecodedParent['queueKey'] - end local jobId = ARGV[1] local timestamp = ARGV[2] @@ -113,7 +100,7 @@ if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists if (numRemovedElements < 1) then return -3 end local eventStreamKey = KEYS[4] - local metaKey = KEYS[9] + local metaKey = KEYS[8] -- Trim events before emiting them to avoid trimming events emitted in this script trimEvents(metaKey, eventStreamKey) @@ -126,12 +113,13 @@ if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists -- 2) move the job Id to parent "processed" set -- 3) push the results into parent "results" list -- 4) if parent's dependencies is empty, then move parent to "wait/paused". Note it may be a different queue!. - if parentId == "" and parentKey ~= "" then - parentId = getJobIdFromKey(parentKey) - parentQueueKey = getJobKeyPrefix(parentKey, ":" .. parentId) - end + local parentKey = jobAttributes[1] or "" + + if jobAttributes[2] ~= false then + local parentData = cjson.decode(jobAttributes[2]) + local parentId = parentData['id'] + local parentQueueKey = parentData['queueKey'] - if parentId ~= "" then if ARGV[5] == "completed" then local dependenciesSet = parentKey .. ":dependencies" if rcall("SREM", dependenciesSet, jobIdKey) == 1 then @@ -140,21 +128,7 @@ if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists ARGV[4], timestamp) end else - if opts['fpof'] then - moveParentFromWaitingChildrenToFailed(parentQueueKey, parentKey, - parentId, jobIdKey, - timestamp) - elseif opts['idof'] or opts['rdof'] then - local dependenciesSet = parentKey .. ":dependencies" - if rcall("SREM", dependenciesSet, jobIdKey) == 1 then - moveParentToWaitIfNeeded(parentQueueKey, dependenciesSet, - parentKey, parentId, timestamp) - if opts['idof'] then - local failedSet = parentKey .. ":failed" - rcall("HSET", failedSet, jobIdKey, ARGV[4]) - end - end - end + moveParentIfNeeded(parentData, parentKey, jobIdKey, ARGV[4], timestamp) end end @@ -162,7 +136,7 @@ if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists -- Remove job? if maxCount ~= 0 then - local targetSet = KEYS[11] + local targetSet = KEYS[10] -- Add to complete/failed set rcall("ZADD", targetSet, timestamp, jobId) rcall("HMSET", jobIdKey, ARGV[3], ARGV[4], "finishedOn", timestamp) @@ -198,19 +172,19 @@ if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists -- Collect metrics if maxMetricsSize ~= "" then - collectMetrics(KEYS[13], KEYS[13] .. ':data', maxMetricsSize, timestamp) + collectMetrics(KEYS[12], KEYS[12] .. ':data', maxMetricsSize, timestamp) end -- Try to get next job to avoid an extra roundtrip if the queue is not closing, -- and not rate limited. if (ARGV[6] == "1") then - local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[2], KEYS[1], KEYS[8]) + local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[2]) - local markerKey = KEYS[14] + local markerKey = KEYS[13] -- Check if there are delayed jobs that can be promoted - promoteDelayedJobs(KEYS[7], markerKey, target, KEYS[3], eventStreamKey, prefix, - timestamp, KEYS[10], isPausedOrMaxed) + promoteDelayedJobs(KEYS[7], markerKey, KEYS[1], KEYS[3], eventStreamKey, prefix, + timestamp, KEYS[9], isPausedOrMaxed) local maxJobs = tonumber(opts['limiter'] and opts['limiter']['max']) -- Check if we are rate limited first. @@ -224,26 +198,10 @@ if rcall("EXISTS", jobIdKey) == 1 then -- // Make sure job exists jobId = rcall("RPOPLPUSH", KEYS[1], KEYS[2]) if jobId then - -- Markers in waitlist DEPRECATED in v5: Remove in v6. - if string.sub(jobId, 1, 2) == "0:" then - rcall("LREM", KEYS[2], 1, jobId) - - -- If jobId is special ID 0:delay (delay greater than 0), then there is no job to process - -- but if ID is 0:0, then there is at least 1 prioritized job to process - if jobId == "0:0" then - jobId = moveJobFromPriorityToActive(KEYS[3], KEYS[2], - KEYS[10]) - return prepareJobForProcessing(prefix, KEYS[6], eventStreamKey, jobId, - timestamp, maxJobs, markerKey, - opts) - end - else - return prepareJobForProcessing(prefix, KEYS[6], eventStreamKey, jobId, - timestamp, maxJobs, markerKey, - opts) - end + return prepareJobForProcessing(prefix, KEYS[6], eventStreamKey, jobId, + timestamp, maxJobs, markerKey, opts) else - jobId = moveJobFromPriorityToActive(KEYS[3], KEYS[2], KEYS[10]) + jobId = moveJobFromPriorityToActive(KEYS[3], KEYS[2], KEYS[9]) if jobId then return prepareJobForProcessing(prefix, KEYS[6], eventStreamKey, jobId, timestamp, maxJobs, markerKey, diff --git a/src/commands/obliterate-2.lua b/src/commands/obliterate-2.lua index 1a7be36393..7814f12dbe 100644 --- a/src/commands/obliterate-2.lua +++ b/src/commands/obliterate-2.lua @@ -81,7 +81,7 @@ if(maxCount <= 0) then return 1 end -local waitKey = baseKey .. 'paused' +local waitKey = baseKey .. 'wait' maxCount = removeListJobs(waitKey, true, baseKey, maxCount) if(maxCount <= 0) then return 1 diff --git a/src/commands/pause-7.lua b/src/commands/pause-7.lua index 90d0f945f7..471083b14a 100644 --- a/src/commands/pause-7.lua +++ b/src/commands/pause-7.lua @@ -19,19 +19,27 @@ local rcall = redis.call -- Includes --- @include "includes/addDelayMarkerIfNeeded" +--- @include "includes/getWaitPlusPrioritizedCount" local markerKey = KEYS[7] -local hasJobs = rcall("EXISTS", KEYS[1]) == 1 ---TODO: check this logic to be reused when changing a delay -if hasJobs then rcall("RENAME", KEYS[1], KEYS[2]) end if ARGV[1] == "paused" then rcall("HSET", KEYS[3], "paused", 1) rcall("DEL", markerKey) else rcall("HDEL", KEYS[3], "paused") + --jobs in paused key + local hasJobs = rcall("EXISTS", KEYS[1]) == 1 + + if hasJobs then + --move a maximum of 7000 per resumed call in order to not block + --if users have more jobs in paused state, call resumed multiple times + local jobs = rcall('LRANGE', KEYS[1], 0, 6999) + rcall("RPUSH", KEYS[2], unpack(jobs)) + rcall("LTRIM", KEYS[1], #jobs, -1) + end - if hasJobs or rcall("ZCARD", KEYS[4]) > 0 then + if getWaitPlusPrioritizedCount(KEYS[2], KEYS[4]) > 0 then -- Add marker if there are waiting or priority jobs rcall("ZADD", markerKey, 0, "0") else diff --git a/src/commands/promote-8.lua b/src/commands/promote-8.lua new file mode 100644 index 0000000000..f691f13e26 --- /dev/null +++ b/src/commands/promote-8.lua @@ -0,0 +1,56 @@ +--[[ + Promotes a job that is currently "delayed" to the "waiting" state + + Input: + KEYS[1] 'delayed' + KEYS[2] 'wait' + KEYS[3] 'meta' + KEYS[4] 'prioritized' + KEYS[5] 'active' + KEYS[6] 'pc' priority counter + KEYS[7] 'event stream' + KEYS[8] 'marker' + + ARGV[1] queue.toKey('') + ARGV[2] jobId + + Output: + 0 - OK + -3 - Job not in delayed zset. + + Events: + 'waiting' +]] +local rcall = redis.call +local jobId = ARGV[2] + +-- Includes +--- @include "includes/addJobInTargetList" +--- @include "includes/addJobWithPriority" +--- @include "includes/isQueuePausedOrMaxed" + +if rcall("ZREM", KEYS[1], jobId) == 1 then + local jobKey = ARGV[1] .. jobId + local priority = tonumber(rcall("HGET", jobKey, "priority")) or 0 + local metaKey = KEYS[3] + local markerKey = KEYS[8] + + local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[5]) + + if priority == 0 then + -- LIFO or FIFO + addJobInTargetList(KEYS[2], markerKey, "LPUSH", isPausedOrMaxed, jobId) + else + addJobWithPriority(markerKey, KEYS[4], priority, jobId, KEYS[6], isPausedOrMaxed) + end + + -- Emit waiting event (wait..ing@token) + rcall("XADD", KEYS[7], "*", "event", "waiting", "jobId", jobId, "prev", + "delayed"); + + rcall("HSET", jobKey, "delay", 0) + + return 0 +else + return -3 +end diff --git a/src/commands/promote-9.lua b/src/commands/promote-9.lua deleted file mode 100644 index 2143e52aa0..0000000000 --- a/src/commands/promote-9.lua +++ /dev/null @@ -1,62 +0,0 @@ ---[[ - Promotes a job that is currently "delayed" to the "waiting" state - - Input: - KEYS[1] 'delayed' - KEYS[2] 'wait' - KEYS[3] 'paused' - KEYS[4] 'meta' - KEYS[5] 'prioritized' - KEYS[6] 'active' - KEYS[7] 'pc' priority counter - KEYS[8] 'event stream' - KEYS[9] 'marker' - - ARGV[1] queue.toKey('') - ARGV[2] jobId - - Output: - 0 - OK - -3 - Job not in delayed zset. - - Events: - 'waiting' -]] -local rcall = redis.call -local jobId = ARGV[2] - --- Includes ---- @include "includes/addJobInTargetList" ---- @include "includes/addJobWithPriority" ---- @include "includes/getTargetQueueList" - -if rcall("ZREM", KEYS[1], jobId) == 1 then - local jobKey = ARGV[1] .. jobId - local priority = tonumber(rcall("HGET", jobKey, "priority")) or 0 - local metaKey = KEYS[4] - local markerKey = KEYS[9] - - -- Remove delayed "marker" from the wait list if there is any. - -- Since we are adding a job we do not need the marker anymore. - -- Markers in waitlist DEPRECATED in v5: Remove in v6. - local target, isPausedOrMaxed = getTargetQueueList(metaKey, KEYS[6], KEYS[2], KEYS[3]) - local marker = rcall("LINDEX", target, 0) - if marker and string.sub(marker, 1, 2) == "0:" then rcall("LPOP", target) end - - if priority == 0 then - -- LIFO or FIFO - addJobInTargetList(target, markerKey, "LPUSH", isPausedOrMaxed, jobId) - else - addJobWithPriority(markerKey, KEYS[5], priority, jobId, KEYS[7], isPausedOrMaxed) - end - - -- Emit waiting event (wait..ing@token) - rcall("XADD", KEYS[8], "*", "event", "waiting", "jobId", jobId, "prev", - "delayed"); - - rcall("HSET", jobKey, "delay", 0) - - return 0 -else - return -3 -end diff --git a/src/commands/removeLegacyMarkers-5.lua b/src/commands/removeLegacyMarkers-5.lua new file mode 100644 index 0000000000..e73d240f47 --- /dev/null +++ b/src/commands/removeLegacyMarkers-5.lua @@ -0,0 +1,55 @@ +--[[ + Remove old legacy markers 0:0 from wait states and finished states if needed + + Input: + KEYS[1] wait key + KEYS[2] paused key + KEYS[3] meta key + KEYS[4] completed key + KEYS[5] failed key + + ARGV[1] prefix key +]] + +local rcall = redis.call +local waitKey = KEYS[1] +local pausedKey = KEYS[2] +local completedKey = KEYS[4] +local failedKey = KEYS[5] + +-- Includes +--- @include "includes/isQueuePaused" +--- @include "includes/getZSetItems" +--- @include "includes/removeJobKeys" + +local isPaused = isQueuePaused(KEYS[3]) + +local function removeMarkerFromWait( stateKey) + local marker = rcall("LINDEX", stateKey, -1) + if marker and string.sub(marker, 1, 2) == "0:" then + rcall("RPOP", stateKey) + end +end + +local function removeMarkerFromFinished(keyName, prefix) + local jobs = getZSetItems(keyName, 0) + if #jobs > 0 then + for _, jobId in ipairs(jobs) do + local jobKey = prefix .. jobId + if jobId and string.sub(jobId, 1, 2) == "0:" then + rcall("ZREM", keyName, jobId) + removeJobKeys(jobKey) + end + end + end +end + +if isPaused then + removeMarkerFromWait(pausedKey) +else + removeMarkerFromWait(waitKey) +end + +-- in cases were they got processed and finished +removeMarkerFromFinished(completedKey, ARGV[1]) +removeMarkerFromFinished(failedKey, ARGV[1]) \ No newline at end of file diff --git a/src/commands/reprocessJob-8.lua b/src/commands/reprocessJob-7.lua similarity index 78% rename from src/commands/reprocessJob-8.lua rename to src/commands/reprocessJob-7.lua index 300ab6a1e8..f393ede5da 100644 --- a/src/commands/reprocessJob-8.lua +++ b/src/commands/reprocessJob-7.lua @@ -7,9 +7,8 @@ KEYS[3] job state KEYS[4] wait key KEYS[5] meta - KEYS[6] paused key - KEYS[7] active key - KEYS[8] marker key + KEYS[6] active key + KEYS[7] marker key ARGV[1] job.id ARGV[2] (job.opts.lifo ? 'R' : 'L') + 'PUSH' @@ -26,15 +25,15 @@ local rcall = redis.call; -- Includes --- @include "includes/addJobInTargetList" --- @include "includes/getOrSetMaxEvents" ---- @include "includes/getTargetQueueList" +--- @include "includes/isQueuePausedOrMaxed" if rcall("EXISTS", KEYS[1]) == 1 then local jobId = ARGV[1] if (rcall("ZREM", KEYS[3], jobId) == 1) then rcall("HDEL", KEYS[1], "finishedOn", "processedOn", ARGV[3]) - local target, isPausedOrMaxed = getTargetQueueList(KEYS[5], KEYS[7], KEYS[4], KEYS[6]) - addJobInTargetList(target, KEYS[8], ARGV[2], isPausedOrMaxed, jobId) + local isPausedOrMaxed = isQueuePausedOrMaxed(KEYS[5], KEYS[6]) + addJobInTargetList(KEYS[4], KEYS[7], ARGV[2], isPausedOrMaxed, jobId) local maxEvents = getOrSetMaxEvents(KEYS[5]) -- Emit waiting event diff --git a/src/commands/retryJob-11.lua b/src/commands/retryJob-10.lua similarity index 55% rename from src/commands/retryJob-11.lua rename to src/commands/retryJob-10.lua index 33d1f7a85a..d5977b41d2 100644 --- a/src/commands/retryJob-11.lua +++ b/src/commands/retryJob-10.lua @@ -4,15 +4,14 @@ Input: KEYS[1] 'active', KEYS[2] 'wait' - KEYS[3] 'paused' - KEYS[4] job key - KEYS[5] 'meta' - KEYS[6] events stream - KEYS[7] delayed key - KEYS[8] prioritized key - KEYS[9] 'pc' priority counter + KEYS[3] job key + KEYS[4] 'meta' + KEYS[5] events stream + KEYS[6] delayed key + KEYS[7] prioritized key + KEYS[8] 'pc' priority counter + KEYS[9] 'stalled' KEYS[10] 'marker' - KEYS[11] 'stalled' ARGV[1] key prefix ARGV[2] timestamp @@ -35,20 +34,21 @@ local rcall = redis.call --- @include "includes/addJobInTargetList" --- @include "includes/addJobWithPriority" --- @include "includes/getOrSetMaxEvents" ---- @include "includes/getTargetQueueList" --- @include "includes/promoteDelayedJobs" --- @include "includes/removeLock" --- @include "includes/isQueuePausedOrMaxed" -local target, isPausedOrMaxed = getTargetQueueList(KEYS[5], KEYS[1], KEYS[2], KEYS[3]) +local jobKey = KEYS[3] +local metaKey = KEYS[4] +local isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[1]) local markerKey = KEYS[10] -- Check if there are delayed jobs that we can move to wait. -- test example: when there are delayed jobs between retries -promoteDelayedJobs(KEYS[7], markerKey, target, KEYS[8], KEYS[6], ARGV[1], ARGV[2], KEYS[9], isPausedOrMaxed) +promoteDelayedJobs(KEYS[6], markerKey, KEYS[2], KEYS[7], KEYS[5], ARGV[1], ARGV[2], KEYS[8], isPausedOrMaxed) -if rcall("EXISTS", KEYS[4]) == 1 then - local errorCode = removeLock(KEYS[4], KEYS[11], ARGV[5], ARGV[4]) +if rcall("EXISTS", jobKey) == 1 then + local errorCode = removeLock(jobKey, KEYS[9], ARGV[5], ARGV[4]) if errorCode < 0 then return errorCode end @@ -56,24 +56,24 @@ if rcall("EXISTS", KEYS[4]) == 1 then local numRemovedElements = rcall("LREM", KEYS[1], -1, ARGV[4]) if (numRemovedElements < 1) then return -3 end - local priority = tonumber(rcall("HGET", KEYS[4], "priority")) or 0 + local priority = tonumber(rcall("HGET", jobKey, "priority")) or 0 --need to re-evaluate after removing job from active - isPausedOrMaxed = isQueuePausedOrMaxed(KEYS[5], KEYS[1]) + isPausedOrMaxed = isQueuePausedOrMaxed(metaKey, KEYS[1]) -- Standard or priority add if priority == 0 then - addJobInTargetList(target, markerKey, ARGV[3], isPausedOrMaxed, ARGV[4]) + addJobInTargetList(KEYS[2], markerKey, ARGV[3], isPausedOrMaxed, ARGV[4]) else - addJobWithPriority(markerKey, KEYS[8], priority, ARGV[4], KEYS[9], isPausedOrMaxed) + addJobWithPriority(markerKey, KEYS[7], priority, ARGV[4], KEYS[8], isPausedOrMaxed) end - rcall("HINCRBY", KEYS[4], "atm", 1) + rcall("HINCRBY", jobKey, "atm", 1) - local maxEvents = getOrSetMaxEvents(KEYS[5]) + local maxEvents = getOrSetMaxEvents(metaKey) -- Emit waiting event - rcall("XADD", KEYS[6], "MAXLEN", "~", maxEvents, "*", "event", "waiting", + rcall("XADD", KEYS[5], "MAXLEN", "~", maxEvents, "*", "event", "waiting", "jobId", ARGV[4], "prev", "failed") return 0 diff --git a/src/interfaces/base-job-options.ts b/src/interfaces/base-job-options.ts index bb10f1caa3..b7094c6389 100644 --- a/src/interfaces/base-job-options.ts +++ b/src/interfaces/base-job-options.ts @@ -76,11 +76,6 @@ export interface DefaultJobOptions { } export interface BaseJobOptions extends DefaultJobOptions { - /** - * Repeat this job, for example based on a `cron` schedule. - */ - repeat?: RepeatOptions; - /** * Internal property used by repeatable jobs to save base repeat job key. */ diff --git a/src/interfaces/debounce-options.ts b/src/interfaces/deduplication-options.ts similarity index 61% rename from src/interfaces/debounce-options.ts rename to src/interfaces/deduplication-options.ts index 02a34fa2fa..81ecd7c47f 100644 --- a/src/interfaces/debounce-options.ts +++ b/src/interfaces/deduplication-options.ts @@ -1,7 +1,7 @@ /** - * Debounce options + * Deduplication options */ -export interface DebounceOptions { +export interface DeduplicationOptions { /** * ttl in milliseconds */ diff --git a/src/interfaces/index.ts b/src/interfaces/index.ts index bf8a6ac0be..9c4e9c4f2a 100644 --- a/src/interfaces/index.ts +++ b/src/interfaces/index.ts @@ -3,7 +3,7 @@ export * from './backoff-options'; export * from './base-job-options'; export * from './child-message'; export * from './connection'; -export * from './debounce-options'; +export * from './deduplication-options'; export * from './flow-job'; export * from './ioredis-events'; export * from './job-json'; diff --git a/src/interfaces/job-json.ts b/src/interfaces/job-json.ts index 25ad335145..e446a8f822 100644 --- a/src/interfaces/job-json.ts +++ b/src/interfaces/job-json.ts @@ -18,7 +18,6 @@ export interface JobJson { parent?: ParentKeys; parentKey?: string; repeatJobKey?: string; - debounceId?: string; deduplicationId?: string; processedBy?: string; } diff --git a/src/interfaces/queue-options.ts b/src/interfaces/queue-options.ts index a1459059e5..f808de342b 100644 --- a/src/interfaces/queue-options.ts +++ b/src/interfaces/queue-options.ts @@ -17,12 +17,6 @@ export interface QueueBaseOptions { */ connection: ConnectionOptions; - /** - * Denotes commands should retry indefinitely. - * @deprecated - */ - blockingConnection?: boolean; - /** * Prefix for all queue keys. */ diff --git a/src/interfaces/repeat-options.ts b/src/interfaces/repeat-options.ts index 67d9450a96..b07089ccc4 100644 --- a/src/interfaces/repeat-options.ts +++ b/src/interfaces/repeat-options.ts @@ -11,14 +11,6 @@ export interface RepeatOptions extends Omit { */ pattern?: string; - /** - * Custom repeatable key. This is the key that holds the "metadata" - * of a given repeatable job. This key is normally auto-generated but - * it is sometimes useful to specify a custom key for easier retrieval - * of repeatable jobs. - */ - key?: string; - /** * Number of times the job should repeat at max. */ @@ -32,10 +24,9 @@ export interface RepeatOptions extends Omit { /** * Repeated job should start right now - * ( work only with every settings) + * (work only with every settings) * * @deprecated - * */ immediately?: boolean; @@ -48,17 +39,4 @@ export interface RepeatOptions extends Omit { * Internal property to store the previous time the job was executed. */ prevMillis?: number; - - /** - * Internal property to store the offset to apply to the next iteration. - * - * @deprecated - */ - offset?: number; - - /** - * Internal property to store de job id - * @deprecated - */ - jobId?: string; } diff --git a/src/types/backoff-strategy.ts b/src/types/backoff-strategy.ts index 50c300d582..ae8b17a9df 100644 --- a/src/types/backoff-strategy.ts +++ b/src/types/backoff-strategy.ts @@ -1,8 +1,30 @@ import { MinimalJob } from '../interfaces/minimal-job'; -export type BackoffStrategy = ( +export type BackoffStrategy4 = ( attemptsMade: number, - type?: string, - err?: Error, - job?: MinimalJob, + type: string, + err: Error, + job: MinimalJob, ) => Promise | number; + +export type BackoffStrategy3 = ( + attemptsMade: number, + type: string, + err: Error, +) => Promise | number; + +export type BackoffStrategy2 = ( + attemptsMade: number, + type: string, +) => Promise | number; + +export type BackoffStrategy1 = (attemptsMade: number) => Promise | number; + +export type BackoffStrategy0 = () => Promise | number; + +export type BackoffStrategy = + | BackoffStrategy4 + | BackoffStrategy3 + | BackoffStrategy2 + | BackoffStrategy1 + | BackoffStrategy0; diff --git a/src/types/job-options.ts b/src/types/job-options.ts index fb8b74d264..f81f92255c 100644 --- a/src/types/job-options.ts +++ b/src/types/job-options.ts @@ -1,31 +1,16 @@ -import { BaseJobOptions, DebounceOptions } from '../interfaces'; +import { BaseJobOptions, DeduplicationOptions } from '../interfaces'; export type JobsOptions = BaseJobOptions & { - /** - * Debounce options. - * @deprecated use deduplication option - */ - debounce?: DebounceOptions; - /** * Deduplication options. */ - deduplication?: DebounceOptions; - - /** - * If true, moves parent to failed. - */ - failParentOnFailure?: boolean; + deduplication?: DeduplicationOptions; /** - * If true, moves the jobId from its parent dependencies to failed dependencies when it fails after all attempts. + * Modes when a child fails: fail, ignore, remove, wait. + * @defaultValue fail */ - ignoreDependencyOnFailure?: boolean; - - /** - * If true, removes the job from its parent dependencies when it fails after all attempts. - */ - removeDependencyOnFailure?: boolean; + onChildFailure?: 'fail' | 'ignore' | 'remove' | 'wait'; }; /** @@ -33,30 +18,20 @@ export type JobsOptions = BaseJobOptions & { */ export type RedisJobOptions = BaseJobOptions & { /** - * Debounce identifier. + * Deduplication identifier. */ deid?: string; /** - * If true, moves parent to failed. - */ - fpof?: boolean; - - /** - * If true, moves the jobId from its parent dependencies to failed dependencies when it fails after all attempts. + * Modes when a child fails: fail, ignore, remove, wait. */ - idof?: boolean; + ocf?: 'fail' | 'ignore' | 'remove' | 'wait'; /** * Maximum amount of log entries that will be preserved */ kl?: number; - /** - * If true, removes the job from its parent dependencies when it fails after all attempts. - */ - rdof?: boolean; - /** * TelemetryMetadata, provide for context propagation. */ diff --git a/src/utils.ts b/src/utils.ts index 4efa2e3467..aaf998a37c 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -200,7 +200,7 @@ export const childSend = ( msg: ChildMessage, ): Promise => asyncSend(proc, msg); -export const isRedisVersionLowerThan = ( +export const isVersionLowerThan = ( currentVersion: string, minimumVersion: string, ): boolean => { diff --git a/src/version.ts b/src/version.ts index 41e46fd8ca..58016e0cb5 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const version = '5.29.0'; +export const version = '6.0.0'; diff --git a/tests/test_clean.ts b/tests/test_clean.ts index 2e844f4d4a..f330e09962 100644 --- a/tests/test_clean.ts +++ b/tests/test_clean.ts @@ -28,8 +28,8 @@ describe('Cleaner', () => { queueName = `test-${v4()}`; queue = new Queue(queueName, { connection, prefix }); queueEvents = new QueueEvents(queueName, { connection, prefix }); - await queueEvents.waitUntilReady(); await queue.waitUntilReady(); + await queueEvents.waitUntilReady(); }); afterEach(async function () { @@ -531,7 +531,7 @@ describe('Cleaner', () => { id: job.id!, queue: job.queueQualifiedName, }, - removeDependencyOnFailure: true, + onChildFailure: 'remove', }, ); await delay(1000); diff --git a/tests/test_concurrency.ts b/tests/test_concurrency.ts index 400ae7bbcc..45ed82ccb3 100644 --- a/tests/test_concurrency.ts +++ b/tests/test_concurrency.ts @@ -156,6 +156,7 @@ describe('Concurrency', () => { }); const queueEvents = new QueueEvents(queueName, { connection, prefix }); await queueEvents.waitUntilReady(); + await queue.waitUntilReady(); await queue.setGlobalConcurrency(1); const worker = new Worker( @@ -239,6 +240,7 @@ describe('Concurrency', () => { prefix, }); const queueEvents = new QueueEvents(queueName, { connection, prefix }); + await queue.waitUntilReady(); await queueEvents.waitUntilReady(); await queue.setGlobalConcurrency(1); diff --git a/tests/test_events.ts b/tests/test_events.ts index a1fd791d99..6cff5ea349 100644 --- a/tests/test_events.ts +++ b/tests/test_events.ts @@ -405,213 +405,9 @@ describe('events', function () { }); }); - describe('when job is debounced when added again with same debounce id', function () { + describe('when job is deduplicated when added again with same deduplication id', function () { describe('when ttl is provided', function () { - it('used a fixed time period and emits debounced event', async function () { - const testName = 'test'; - - const job = await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - - let debouncedCounter = 0; - let secondJob; - queueEvents.on('debounced', ({ jobId, debounceId }) => { - if (debouncedCounter > 1) { - expect(jobId).to.be.equal(secondJob.id); - expect(debounceId).to.be.equal('a1'); - } else { - expect(jobId).to.be.equal(job.id); - expect(debounceId).to.be.equal('a1'); - } - debouncedCounter++; - }); - - await delay(1000); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await delay(1100); - secondJob = await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await delay(100); - - expect(debouncedCounter).to.be.equal(4); - }); - - describe('when removing debounced job', function () { - it('removes debounce key', async function () { - const testName = 'test'; - - const job = await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - - let debouncedCounter = 0; - queueEvents.on('debounced', ({ jobId }) => { - debouncedCounter++; - }); - await job.remove(); - - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await delay(1000); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await delay(1100); - const secondJob = await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await secondJob.remove(); - - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1', ttl: 2000 } }, - ); - await delay(100); - - expect(debouncedCounter).to.be.equal(2); - }); - }); - }); - - describe('when ttl is not provided', function () { - it('waits until job is finished before removing debounce key', async function () { - const testName = 'test'; - - const worker = new Worker( - queueName, - async () => { - await delay(100); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1' } }, - ); - await delay(100); - await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1' } }, - ); - await delay(100); - }, - { - autorun: false, - connection, - prefix, - }, - ); - await worker.waitUntilReady(); - - let debouncedCounter = 0; - - const completing = new Promise(resolve => { - queueEvents.once('completed', ({ jobId }) => { - expect(jobId).to.be.equal('1'); - resolve(); - }); - - queueEvents.on('debounced', ({ jobId }) => { - debouncedCounter++; - }); - }); - - worker.run(); - - await queue.add(testName, { foo: 'bar' }, { debounce: { id: 'a1' } }); - - await completing; - - const secondJob = await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1' } }, - ); - - const count = await queue.getJobCountByTypes(); - - expect(count).to.be.eql(2); - - expect(debouncedCounter).to.be.equal(2); - expect(secondJob.id).to.be.equal('4'); - await worker.close(); - }); - - describe('when removing debounced job', function () { - it('removes debounce key', async function () { - const testName = 'test'; - - const job = await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1' } }, - ); - - let debouncedCounter = 0; - queueEvents.on('debounced', ({ jobId }) => { - debouncedCounter++; - }); - await job.remove(); - - await queue.add(testName, { foo: 'bar' }, { debounce: { id: 'a1' } }); - - await queue.add(testName, { foo: 'bar' }, { debounce: { id: 'a1' } }); - await delay(100); - const secondJob = await queue.add( - testName, - { foo: 'bar' }, - { debounce: { id: 'a1' } }, - ); - await secondJob.remove(); - - expect(debouncedCounter).to.be.equal(2); - }); - }); - }); - }); - - describe('when job is deduplicated when added again with same debounce id', function () { - describe('when ttl is provided', function () { - it('used a fixed time period and emits debounced event', async function () { + it('used a fixed time period and emits deduplicated event', async function () { const testName = 'test'; const job = await queue.add( @@ -718,7 +514,7 @@ describe('events', function () { }); describe('when ttl is not provided', function () { - it('waits until job is finished before removing debounce key', async function () { + it('waits until job is finished before removing deduplication key', async function () { const testName = 'test'; const worker = new Worker( @@ -761,7 +557,11 @@ describe('events', function () { worker.run(); - await queue.add(testName, { foo: 'bar' }, { debounce: { id: 'a1' } }); + await queue.add( + testName, + { foo: 'bar' }, + { deduplication: { id: 'a1' } }, + ); await completing; diff --git a/tests/test_flow.ts b/tests/test_flow.ts index 0369a39988..3669c00e54 100644 --- a/tests/test_flow.ts +++ b/tests/test_flow.ts @@ -40,20 +40,26 @@ describe('flows', () => { }); describe('when removeOnFail is true in last pending child', () => { - it('moves parent to wait without getting stuck', async () => { + it('moves parent to failed without getting stuck', async () => { const worker = new Worker( queueName, async job => { - if (job.name === 'child0') { + if (job.name === 'child1') { + await delay(50); throw new Error('fail'); } }, { connection, prefix }, ); await worker.waitUntilReady(); + const queueEvents = new QueueEvents(queueName, { + connection, + prefix, + }); + await queueEvents.waitUntilReady(); const flow = new FlowProducer({ connection, prefix }); - await flow.add({ + const tree = await flow.add({ name: 'parent', data: {}, queueName, @@ -62,33 +68,29 @@ describe('flows', () => { queueName, name: 'child0', data: {}, - opts: { - removeOnFail: true, - }, }, { queueName, name: 'child1', data: {}, + opts: { + removeOnFail: true, + }, }, ], }); - const completed = new Promise((resolve, reject) => { - worker.on('completed', async (job: Job) => { - try { - if (job.name === 'parent') { - const { processed } = await job.getDependenciesCount(); - expect(processed).to.equal(1); - resolve(); - } - } catch (err) { - reject(err); + const failed = new Promise(resolve => { + queueEvents.on('failed', async ({ jobId }) => { + if (jobId === tree.job.id) { + const { processed } = await tree.job!.getDependenciesCount(); + expect(processed).to.equal(1); + resolve(); } }); }); - await completed; + await failed; await flow.close(); await worker.close(); }); @@ -252,9 +254,9 @@ describe('flows', () => { }).timeout(8000); }); - describe('when child is debounced when added again with same debounce id', function () { + describe('when child is deduplicated when added again with same deduplication id', function () { describe('when ttl is not provided', function () { - it('waits until job is finished before removing debounce key', async function () { + it('waits until job is finished before removing deduplication key', async function () { const parentQueueName = `parent-queue-${v4()}`; const flow = new FlowProducer({ connection, prefix }); @@ -266,10 +268,10 @@ describe('flows', () => { async job => { await delay(100); - const jobIdFromDebounceKey = await queue.getDebounceJobId( - 'debounce_id', + const jobIdFromDeduplicationKey = await queue.getDeduplicationJobId( + 'deduplication_id', ); - expect(jobIdFromDebounceKey).to.be.equal(job.id); + expect(jobIdFromDeduplicationKey).to.be.equal(job.id); await flow.add({ name: 'parent', @@ -281,8 +283,8 @@ describe('flows', () => { name: 'child0', data: {}, opts: { - debounce: { - id: 'debounce_id', + deduplication: { + id: 'deduplication_id', }, }, }, @@ -309,15 +311,15 @@ describe('flows', () => { name: 'child0', data: {}, opts: { - debounce: { - id: 'debounce_id', + deduplication: { + id: 'deduplication_id', }, }, }, ], }); - let debouncedCounter = 0; + let deduplicatedCounter = 0; const completing = new Promise(resolve => { queueEvents.once('completed', ({ jobId }) => { @@ -325,8 +327,8 @@ describe('flows', () => { resolve(); }); - queueEvents.on('debounced', ({ jobId }) => { - debouncedCounter++; + queueEvents.on('deduplicated', ({ jobId }) => { + deduplicatedCounter++; }); }); @@ -334,12 +336,12 @@ describe('flows', () => { await completing; - const jobIdFromDebounceKey = await queue.getDebounceJobId( - 'debounce_id', + const jobIdFromDeduplicationKey = await queue.getDeduplicationJobId( + 'deduplication_id', ); - expect(jobIdFromDebounceKey).to.be.null; + expect(jobIdFromDeduplicationKey).to.be.null; - expect(debouncedCounter).to.be.equal(1); + expect(deduplicatedCounter).to.be.equal(1); await worker.close(); await queueEvents.close(); @@ -757,7 +759,7 @@ describe('flows', () => { }); }); - describe('when ignoreDependencyOnFailure is provided', async () => { + describe('when onChildFailure is provided as ignore', async () => { it('moves parent to wait after children fail', async () => { const parentQueueName = `parent-queue-${v4()}`; const parentQueue = new Queue(parentQueueName, { connection, prefix }); @@ -810,19 +812,25 @@ describe('flows', () => { name, data: { idx: 0, foo: 'bar' }, queueName, - opts: { ignoreDependencyOnFailure: true }, + opts: { + onChildFailure: 'ignore', + }, }, { name, data: { idx: 1, foo: 'baz' }, queueName, - opts: { ignoreDependencyOnFailure: true }, + opts: { + onChildFailure: 'ignore', + }, }, { name, data: { idx: 2, foo: 'qux' }, queueName, - opts: { ignoreDependencyOnFailure: true }, + opts: { + onChildFailure: 'ignore', + }, }, ], }); @@ -862,7 +870,7 @@ describe('flows', () => { }).timeout(8000); }); - describe('when removeDependencyOnFailure is provided', async () => { + describe('when onChildFailure is provided as remove', async () => { it('moves parent to wait after children fail', async () => { const parentQueueName = `parent-queue-${v4()}`; const parentQueue = new Queue(parentQueueName, { connection, prefix }); @@ -915,19 +923,25 @@ describe('flows', () => { name, data: { idx: 0, foo: 'bar' }, queueName, - opts: { removeDependencyOnFailure: true }, + opts: { + onChildFailure: 'remove', + }, }, { name, data: { idx: 1, foo: 'baz' }, queueName, - opts: { removeDependencyOnFailure: true }, + opts: { + onChildFailure: 'remove', + }, }, { name, data: { idx: 2, foo: 'qux' }, queueName, - opts: { removeDependencyOnFailure: true }, + opts: { + onChildFailure: 'remove', + }, }, ], }); @@ -958,6 +972,66 @@ describe('flows', () => { }).timeout(8000); }); + describe('when onChildFailure is provided as wait', () => { + it('keeps parent in waiting-children state', async () => { + const worker = new Worker( + queueName, + async job => { + if (job.name === 'child0') { + await delay(75); + throw new Error('fail'); + } + }, + { connection, prefix }, + ); + await worker.waitUntilReady(); + const queueEvents = new QueueEvents(queueName, { + connection, + prefix, + }); + await queueEvents.waitUntilReady(); + + const flow = new FlowProducer({ connection, prefix }); + const tree = await flow.add({ + name: 'parent', + data: {}, + queueName, + children: [ + { + queueName, + name: 'child0', + data: {}, + opts: { + onChildFailure: 'wait', + }, + }, + { + queueName, + name: 'child1', + data: {}, + }, + ], + }); + + const failed = new Promise(resolve => { + queueEvents.on('completed', async ({ jobId }) => { + if (jobId === tree.children![1].job.id) { + const { processed } = await tree.job!.getDependenciesCount(); + expect(processed).to.equal(1); + resolve(); + } + }); + }); + + await failed; + const parentState = await tree.job.getState(); + expect(parentState).to.equal('waiting-children'); + + await flow.close(); + await worker.close(); + }); + }); + describe('when chaining flows at runtime using step jobs', () => { it('should wait children as one step of the parent job', async function () { this.timeout(8000); @@ -2105,7 +2179,7 @@ describe('flows', () => { }).timeout(8000); }); - describe('when failParentOnFailure option is provided', async () => { + describe('when onChildFailure option is provided as fail', async () => { it('should move parent to failed when child is moved to failed', async () => { const name = 'child-job'; @@ -2165,13 +2239,13 @@ describe('flows', () => { name, data: { foo: 'qux' }, queueName, - opts: { failParentOnFailure: true }, + opts: { onChildFailure: 'fail' }, children: [ { name, data: { foo: 'bar' }, queueName: grandChildrenQueueName, - opts: { failParentOnFailure: true }, + opts: { onChildFailure: 'fail' }, }, { name, @@ -2241,7 +2315,7 @@ describe('flows', () => { await removeAllQueueData(new IORedis(redisHost), grandChildrenQueueName); }).timeout(8000); - describe('when removeDependencyOnFailure is provided', async () => { + describe('when onChildFailure is provided as remove', async () => { it('moves parent to wait after children fail', async () => { const name = 'child-job'; @@ -2293,13 +2367,15 @@ describe('flows', () => { name, data: { foo: 'qux' }, queueName, - opts: { removeDependencyOnFailure: true }, + opts: { + onChildFailure: 'remove', + }, children: [ { name, data: { foo: 'bar' }, queueName: grandChildrenQueueName, - opts: { failParentOnFailure: true }, + opts: { onChildFailure: 'fail' }, }, { name, @@ -2383,7 +2459,7 @@ describe('flows', () => { }).timeout(8000); }); - describe('when ignoreDependencyOnFailure is provided', async () => { + describe('when onChildFailure is provided as ignore', async () => { it('moves parent to wait after children fail', async () => { const name = 'child-job'; @@ -2435,13 +2511,13 @@ describe('flows', () => { name, data: { foo: 'qux' }, queueName, - opts: { ignoreDependencyOnFailure: true }, + opts: { onChildFailure: 'ignore' }, children: [ { name, data: { foo: 'bar' }, queueName: grandChildrenQueueName, - opts: { failParentOnFailure: true }, + opts: { onChildFailure: 'fail' }, }, { name, diff --git a/tests/test_getters.ts b/tests/test_getters.ts index 4007f3dcf7..d1f2ff5584 100644 --- a/tests/test_getters.ts +++ b/tests/test_getters.ts @@ -809,20 +809,25 @@ describe('Jobs getters', function () { }); }); + await queue.add('test', {}); const flow = new FlowProducer({ connection, prefix }); await flow.add({ name: 'parent-job', queueName, data: {}, children: [ - { name: 'child-1', data: { idx: 0, foo: 'bar' }, queueName }, + { + name: 'child-1', + data: { idx: 0, foo: 'bar' }, + queueName, + opts: { delay: 6000 }, + }, { name: 'child-2', data: { idx: 1, foo: 'baz' }, queueName }, { name: 'child-3', data: { idx: 2, foo: 'bac' }, queueName }, { name: 'child-4', data: { idx: 3, foo: 'bad' }, queueName }, ], }); - await queue.add('test', { idx: 2 }, { delay: 5000 }); await queue.add('test', { idx: 3 }, { priority: 5 }); await completing; diff --git a/tests/test_job.ts b/tests/test_job.ts index 0393fe006e..3b2d810ff2 100644 --- a/tests/test_job.ts +++ b/tests/test_job.ts @@ -139,45 +139,6 @@ describe('Job', function () { }); }); - describe('when removeDependencyOnFailure and failParentOnFailure options are provided', () => { - it('throws an error', async () => { - const data = { foo: 'bar' }; - const opts = { - removeDependencyOnFailure: true, - failParentOnFailure: true, - }; - await expect(Job.create(queue, 'test', data, opts)).to.be.rejectedWith( - 'RemoveDependencyOnFailure and failParentOnFailure options can not be used together', - ); - }); - }); - - describe('when removeDependencyOnFailure and ignoreDependencyOnFailure options are provided', () => { - it('throws an error', async () => { - const data = { foo: 'bar' }; - const opts = { - removeDependencyOnFailure: true, - ignoreDependencyOnFailure: true, - }; - await expect(Job.create(queue, 'test', data, opts)).to.be.rejectedWith( - 'RemoveDependencyOnFailure and ignoreDependencyOnFailure options can not be used together', - ); - }); - }); - - describe('when failParentOnFailure and ignoreDependencyOnFailure options are provided', () => { - it('throws an error', async () => { - const data = { foo: 'bar' }; - const opts = { - ignoreDependencyOnFailure: true, - failParentOnFailure: true, - }; - await expect(Job.create(queue, 'test', data, opts)).to.be.rejectedWith( - 'FailParentOnFailure and ignoreDependencyOnFailure options can not be used together', - ); - }); - }); - describe('when priority option is provided as float', () => { it('throws an error', async () => { const data = { foo: 'bar' }; @@ -1472,7 +1433,7 @@ describe('Job', function () { await queue.pause(); await delayedJob.promote(); - const pausedJobsCount = await queue.getJobCountByTypes('paused'); + const pausedJobsCount = await queue.getWaitingCount(); expect(pausedJobsCount).to.be.equal(2); await queue.resume(); @@ -1494,7 +1455,7 @@ describe('Job', function () { await queue.pause(); await delayedJob.promote(); - const pausedJobsCount = await queue.getJobCountByTypes('paused'); + const pausedJobsCount = await queue.getWaitingCount(); expect(pausedJobsCount).to.be.equal(1); await queue.resume(); diff --git a/tests/test_migrations.ts b/tests/test_migrations.ts new file mode 100644 index 0000000000..7006c1ac16 --- /dev/null +++ b/tests/test_migrations.ts @@ -0,0 +1,90 @@ +import { expect } from 'chai'; +import { default as IORedis } from 'ioredis'; +import { describe, beforeEach, it, before, after as afterAll } from 'mocha'; +import * as sinon from 'sinon'; +import { v4 } from 'uuid'; +import { Queue } from '../src/classes'; +import { removeAllQueueData } from '../src/utils'; + +describe('migrations', function () { + const redisHost = process.env.REDIS_HOST || 'localhost'; + const prefix = process.env.BULLMQ_TEST_PREFIX || 'bull'; + + const sandbox = sinon.createSandbox(); + + let queue: Queue; + let queueName: string; + + let connection; + before(async function () { + connection = new IORedis(redisHost, { maxRetriesPerRequest: null }); + }); + + beforeEach(async function () { + queueName = `test-${v4()}`; + queue = new Queue(queueName, { connection, prefix }); + await queue.waitUntilReady(); + }); + + afterEach(async function () { + sandbox.restore(); + await queue.close(); + await removeAllQueueData(new IORedis(redisHost), queueName); + }); + + afterAll(async function () { + await connection.quit(); + }); + + describe('execute migrations', () => { + describe('removeLegacyMarkers', () => { + it('removes old markers', async () => { + const client = await queue.client; + const completedKey = `${prefix}:${queueName}:completed`; + const failedKey = `${prefix}:${queueName}:failed`; + const waitingKey = `${prefix}:${queueName}:wait`; + await client.zadd(completedKey, 1, '0:2'); + await client.zadd(completedKey, 1, '0:2'); + await client.zadd(failedKey, 2, '0:1'); + await client.rpush(waitingKey, '0:0'); + + await queue.runMigrations(); + + const keys = await client.keys(`${prefix}:${queueName}:*`); + + // meta key, migrations + expect(keys.length).to.be.eql(2); + + const completedCount = await client.zcard(completedKey); + expect(completedCount).to.be.eql(0); + + const failedCount = await client.zcard(failedKey); + expect(failedCount).to.be.eql(0); + + const waitingCount = await client.llen(waitingKey); + expect(waitingCount).to.be.eql(0); + }); + }); + + describe('migratePausedKey', () => { + it('moves jobs from paused to wait', async () => { + const client = await queue.client; + await client.lpush( + `${prefix}:${queueName}:paused`, + '1', + '2', + '3', + '4', + '5', + '6', + ); + + await queue.runMigrations(); + + const jobs = await client.lrange(`${prefix}:${queueName}:wait`, 0, -1); + + expect(jobs).to.be.eql(['6', '5', '4', '3', '2', '1']); + }); + }); + }); +}); diff --git a/tests/test_obliterate.ts b/tests/test_obliterate.ts index 48967bd5df..89b8223b5a 100644 --- a/tests/test_obliterate.ts +++ b/tests/test_obliterate.ts @@ -74,6 +74,7 @@ describe('Obliterate', function () { await queue.obliterate(); const client = await queue.client; const keys = await client.keys(`${prefix}:${queue.name}:*`); + expect(keys.length).to.be.eql(0); await worker.close(); @@ -375,6 +376,7 @@ describe('Obliterate', function () { await queue.obliterate({ force: true }); const client = await queue.client; const keys = await client.keys(`${prefix}:${queue.name}:*`); + // only migration key should be kept expect(keys.length).to.be.eql(0); await worker.close(); diff --git a/tests/test_pause.ts b/tests/test_pause.ts index 9ad1f6e5c5..38e809da18 100644 --- a/tests/test_pause.ts +++ b/tests/test_pause.ts @@ -58,9 +58,8 @@ describe('Pause', function () { if (processed) { throw new Error('should not process delayed jobs in paused queue.'); } - const counts2 = await queue.getJobCounts('waiting', 'paused', 'delayed'); - expect(counts2).to.have.property('waiting', 0); - expect(counts2).to.have.property('paused', 1); + const counts2 = await queue.getJobCounts('waiting', 'delayed'); + expect(counts2).to.have.property('waiting', 1); expect(counts2).to.have.property('delayed', 0); await worker.close(); @@ -380,7 +379,7 @@ describe('Pause', function () { const waitingEvent = new Promise(resolve => { queueEvents.on('waiting', async ({ prev }) => { if (prev === 'failed') { - const count = await queue.getJobCountByTypes('paused'); + const count = await queue.getJobCountByTypes('wait'); expect(count).to.be.equal(1); await queue.resume(); resolve(); diff --git a/tests/test_queue.ts b/tests/test_queue.ts index 80bd41a92d..51c9ed9c51 100644 --- a/tests/test_queue.ts +++ b/tests/test_queue.ts @@ -62,6 +62,15 @@ describe('queues', function () { ).to.be.rejectedWith('Custom Ids cannot be integers'); }); }); + + describe('when jobId contains :', () => { + it('throws error', async () => { + const opts = { jobId: 'job:id' }; + await expect(queue.add('test', {}, opts)).to.be.rejectedWith( + "JobId cannot be '0' or contain :", + ); + }); + }); }); describe('when empty name contains :', () => { @@ -113,7 +122,14 @@ describe('queues', function () { for (const key of keys) { const type = key.split(':')[2]; - expect(['marker', 'events', 'meta', 'pc', 'id']).to.include(type); + expect([ + 'marker', + 'migrations', + 'events', + 'meta', + 'pc', + 'id', + ]).to.include(type); } }).timeout(10000); @@ -398,7 +414,7 @@ describe('queues', function () { describe('when queue is paused', () => { it('clean queue including paused jobs', async () => { const maxJobs = 50; - const added = []; + const added: Promise[] = []; await queue.pause(); for (let i = 1; i <= maxJobs; i++) { @@ -408,8 +424,8 @@ describe('queues', function () { await Promise.all(added); const count = await queue.count(); expect(count).to.be.eql(maxJobs); - const count2 = await queue.getJobCounts('paused'); - expect(count2.paused).to.be.eql(maxJobs); + const count2 = await queue.getJobCounts('wait'); + expect(count2.wait).to.be.eql(maxJobs); await queue.drain(); const countAfterEmpty = await queue.count(); expect(countAfterEmpty).to.be.eql(0); @@ -611,7 +627,7 @@ describe('queues', function () { }); describe('when queue is paused', () => { - it('moves retried jobs to paused', async () => { + it('moves retried jobs to wait', async () => { await queue.waitUntilReady(); const jobCount = 8; @@ -654,8 +670,8 @@ describe('queues', function () { await queue.pause(); await queue.retryJobs({ count: 2 }); - const pausedCount = await queue.getJobCounts('paused'); - expect(pausedCount.paused).to.be.equal(jobCount); + const pausedCount = await queue.getJobCounts('wait'); + expect(pausedCount.wait).to.be.equal(jobCount); await worker.close(); }); diff --git a/tests/test_rate_limiter.ts b/tests/test_rate_limiter.ts index 28e4c5c9ad..231882a0c2 100644 --- a/tests/test_rate_limiter.ts +++ b/tests/test_rate_limiter.ts @@ -7,6 +7,7 @@ import { FlowProducer, Queue, QueueEvents, + RateLimitError, Worker, UnrecoverableError, } from '../src/classes'; @@ -28,6 +29,7 @@ describe('Rate Limiter', function () { queueName = `test-${v4()}`; queue = new Queue(queueName, { connection, prefix }); queueEvents = new QueueEvents(queueName, { connection, prefix }); + await queue.waitUntilReady(); await queueEvents.waitUntilReady(); }); @@ -137,7 +139,7 @@ describe('Rate Limiter', function () { }); describe('when queue is paused between rate limit', () => { - it('should add active jobs to paused', async function () { + it('should add active jobs to wait', async function () { this.timeout(20000); const numJobs = 4; @@ -184,10 +186,9 @@ describe('Rate Limiter', function () { await delay(500); - const counts = await queue.getJobCounts('paused', 'completed', 'wait'); - expect(counts).to.have.property('paused', numJobs - 1); + const counts = await queue.getJobCounts('completed', 'wait'); expect(counts).to.have.property('completed', 1); - expect(counts).to.have.property('wait', 0); + expect(counts).to.have.property('wait', numJobs - 1); await worker1.close(); await worker2.close(); @@ -370,11 +371,11 @@ describe('Rate Limiter', function () { queueName, async job => { if (job.attemptsStarted === 1) { - await worker.rateLimit(dynamicLimit); + await queue.rateLimit(dynamicLimit); const currentTtl = await queue.getRateLimitTtl(); expect(currentTtl).to.be.lessThanOrEqual(250); expect(currentTtl).to.be.greaterThan(100); - throw Worker.RateLimitError(); + throw new RateLimitError(); } }, { @@ -439,10 +440,10 @@ describe('Rate Limiter', function () { async job => { if (job.attemptsStarted === 1) { delay(50); - await worker.rateLimit(dynamicLimit); + await queue.rateLimit(dynamicLimit); const currentTtl = await queue.getRateLimitTtl(); expect(currentTtl).to.be.lessThanOrEqual(dynamicLimit); - throw Worker.RateLimitError(); + throw new RateLimitError(); } }, { @@ -616,11 +617,11 @@ describe('Rate Limiter', function () { const worker = new Worker( queueName, async job => { - await worker.rateLimit(dynamicLimit); + await queue.rateLimit(dynamicLimit); if (job.attemptsStarted >= job.opts.attempts!) { throw new UnrecoverableError('Unrecoverable'); } - throw Worker.RateLimitError(); + throw new RateLimitError(); }, { connection, @@ -678,8 +679,8 @@ describe('Rate Limiter', function () { priority -= 1; extraCount -= 1; } - await worker.rateLimit(dynamicLimit); - throw Worker.RateLimitError(); + await queue.rateLimit(dynamicLimit); + throw new RateLimitError(); } }, { @@ -692,6 +693,8 @@ describe('Rate Limiter', function () { }, ); + await worker.waitUntilReady(); + const result = new Promise((resolve, reject) => { queueEvents.on( 'completed', @@ -731,8 +734,8 @@ describe('Rate Limiter', function () { const worker = new Worker( queueName, async () => { - await worker.rateLimit(dynamicLimit); - throw Worker.RateLimitError(); + await queue.rateLimit(dynamicLimit); + throw new RateLimitError(); }, { connection, @@ -769,8 +772,8 @@ describe('Rate Limiter', function () { const worker = new Worker( queueName, async () => { - await worker.rateLimit(dynamicLimit); - throw Worker.RateLimitError(); + await queue.rateLimit(dynamicLimit); + throw new RateLimitError(); }, { connection, @@ -797,7 +800,7 @@ describe('Rate Limiter', function () { }); describe('when queue is paused', () => { - it('moves job to paused', async function () { + it('moves job to wait', async function () { const dynamicLimit = 250; const duration = 100; @@ -807,8 +810,8 @@ describe('Rate Limiter', function () { if (job.attemptsMade === 0) { await queue.pause(); await delay(150); - await worker.rateLimit(dynamicLimit); - throw Worker.RateLimitError(); + await queue.rateLimit(dynamicLimit); + throw new RateLimitError(); } }, { @@ -839,7 +842,7 @@ describe('Rate Limiter', function () { await result; - const pausedCount = await queue.getJobCountByTypes('paused'); + const pausedCount = await queue.getJobCountByTypes('wait'); expect(pausedCount).to.equal(1); await worker.close(); @@ -867,7 +870,7 @@ describe('Rate Limiter', function () { }, }); - await worker.rateLimit(dynamicLimit); + await queue.rateLimit(dynamicLimit); await queue.removeRateLimitKey(); const result = new Promise((resolve, reject) => { diff --git a/tests/test_repeat.ts b/tests/test_repeat.ts deleted file mode 100644 index ed6ee6e301..0000000000 --- a/tests/test_repeat.ts +++ /dev/null @@ -1,1970 +0,0 @@ -import { expect } from 'chai'; -import { default as IORedis } from 'ioredis'; -import { beforeEach, describe, it, before, after as afterAll } from 'mocha'; -import * as sinon from 'sinon'; -import { v4 } from 'uuid'; -import { rrulestr } from 'rrule'; -import { - Job, - Queue, - QueueEvents, - Repeat, - getNextMillis, - Worker, -} from '../src/classes'; -import { JobsOptions } from '../src/types'; -import { removeAllQueueData } from '../src/utils'; -import { - createRepeatableJobKey, - extractRepeatableJobChecksumFromRedisKey, - getRepeatableJobKeyPrefix, - getRepeatJobIdCheckum, -} from './utils/repeat_utils'; - -const moment = require('moment'); - -const ONE_SECOND = 1000; -const ONE_MINUTE = 60 * ONE_SECOND; -const ONE_HOUR = 60 * ONE_MINUTE; -const ONE_DAY = 24 * ONE_HOUR; - -const NoopProc = async (job: Job) => {}; - -describe('repeat', function () { - const redisHost = process.env.REDIS_HOST || 'localhost'; - const prefix = process.env.BULLMQ_TEST_PREFIX || 'bull'; - this.timeout(10000); - let repeat: Repeat; - let queue: Queue; - let queueEvents: QueueEvents; - let queueName: string; - - let connection; - before(async function () { - connection = new IORedis(redisHost, { maxRetriesPerRequest: null }); - }); - - beforeEach(async function () { - this.clock = sinon.useFakeTimers(); - queueName = `test-${v4()}`; - queue = new Queue(queueName, { connection, prefix }); - repeat = new Repeat(queueName, { connection, prefix }); - queueEvents = new QueueEvents(queueName, { connection, prefix }); - await queue.waitUntilReady(); - await queueEvents.waitUntilReady(); - }); - - afterEach(async function () { - this.clock.restore(); - await queue.close(); - await repeat.close(); - await queueEvents.close(); - await removeAllQueueData(new IORedis(redisHost), queueName); - }); - - afterAll(async function () { - await connection.quit(); - }); - - describe('when exponential backoff is applied', () => { - it('should retry a job respecting exponential backoff strategy', async function () { - let delay = 10000; - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const worker = new Worker( - queueName, - async () => { - throw Error('error'); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => { - console.log('delay'); - }); - await worker.waitUntilReady(); - - const failing = new Promise(resolve => { - worker.on('failed', async job => { - this.clock.tick(delay + 10); - delay = delay * 2; - - if (job!.attemptsMade === 10) { - resolve(); - } - }); - }); - - await queue.add( - 'test', - { foo: 'bar' }, - { - attempts: 10, - backoff: { - type: 'exponential', - delay, - }, - }, - ); - - worker.run(); - - await failing; - - await worker.close(); - delayStub.restore(); - }); - }); - - describe('when endDate is not greater than current timestamp', () => { - it('throws an error', async function () { - await expect( - queue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - endDate: Date.now() - 1000, - every: 100, - }, - }, - ), - ).to.be.rejectedWith('End date must be greater than current timestamp'); - }); - }); - - it('it should stop repeating after endDate', async function () { - const every = 100; - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const worker = new Worker( - queueName, - async () => { - this.clock.tick(every); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - await worker.waitUntilReady(); - - let processed = 0; - const completing = new Promise(resolve => { - worker.on('completed', async () => { - processed++; - if (processed === 10) { - resolve(); - } - }); - }); - - const job = await queue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - endDate: Date.now() + 1000, - every: 100, - }, - }, - ); - - expect(job.repeatJobKey).to.not.be.undefined; - - this.clock.tick(every + 1); - - worker.run(); - - await completing; - - const delayed = await queue.getDelayed(); - - expect(delayed).to.have.length(0); - expect(processed).to.be.equal(10); - - await worker.close(); - delayStub.restore(); - }); - - describe('when jobs have the same cron pattern and different name', function () { - it('should create multiple jobs', async function () { - const cron = '*/10 * * * * *'; - - await Promise.all([ - queue.add('test1', {}, { repeat: { pattern: cron } }), - queue.add('test2', {}, { repeat: { pattern: cron } }), - queue.add('test3', {}, { repeat: { pattern: cron } }), - ]); - - const count = await queue.count(); - expect(count).to.be.eql(3); - }); - }); - - describe('when jobs have same key and different every pattern', function () { - it('should create only one repeatable job', async function () { - await Promise.all([ - queue.add('test1', {}, { repeat: { every: 1000, key: 'test' } }), - queue.add('test2', {}, { repeat: { every: 2000, key: 'test' } }), - queue.add('test3', {}, { repeat: { every: 3000, key: 'test' } }), - ]); - - const repeatableJobs = await queue.getRepeatableJobs(); - expect(repeatableJobs.length).to.be.eql(1); - }); - }); - - it('should get repeatable jobs with different cron pattern', async function () { - const crons = [ - '10 * * * * *', - '2 10 * * * *', - '1 * * 5 * *', - '2 * * 4 * *', - ]; - - await Promise.all([ - queue.add('first', {}, { repeat: { pattern: crons[0], endDate: 12345 } }), - queue.add( - 'second', - {}, - { repeat: { pattern: crons[1], endDate: 610000 } }, - ), - queue.add( - 'third', - {}, - { repeat: { pattern: crons[2], tz: 'Africa/Abidjan' } }, - ), - queue.add( - 'fourth', - {}, - { repeat: { pattern: crons[3], tz: 'Africa/Accra' } }, - ), - queue.add( - 'fifth', - {}, - { repeat: { every: 5000, tz: 'Europa/Copenhaguen' } }, - ), - ]); - const count = await repeat.getRepeatableCount(); - expect(count).to.be.eql(5); - - let jobs = await repeat.getRepeatableJobs(0, -1, true); - jobs = await jobs.sort(function (a, b) { - return crons.indexOf(a.pattern!) - crons.indexOf(b.pattern!); - }); - expect(jobs) - .to.be.and.an('array') - .and.have.length(5) - .and.to.deep.include({ - key: '81e7865a899dddf47c3ad19649304bac', - name: 'first', - endDate: 12345, - tz: null, - pattern: '10 * * * * *', - every: null, - next: 10000, - }) - .and.to.deep.include({ - key: '47f7425312b6adf8db58ebd37c7ad8be', - name: 'second', - endDate: 610000, - tz: null, - pattern: '2 10 * * * *', - every: null, - next: 602000, - }) - .and.to.deep.include({ - key: 'f1e05411209310794fb4b34ec2a8df6b', - name: 'fourth', - endDate: null, - tz: 'Africa/Accra', - pattern: '2 * * 4 * *', - every: null, - next: 259202000, - }) - .and.to.deep.include({ - key: 'd58b8d085ba529d423d59e220a813f82', - name: 'third', - endDate: null, - tz: 'Africa/Abidjan', - pattern: '1 * * 5 * *', - every: null, - next: 345601000, - }) - .and.to.deep.include({ - key: 'e891826d68ad4ffbd7243b7f98d88614', - name: 'fifth', - endDate: null, - tz: 'Europa/Copenhaguen', - pattern: null, - every: '5000', - next: 5000, - }); - }); - - it('should repeat every 2 seconds', async function () { - this.timeout(10000); - - const nextTick = 2 * ONE_SECOND + 100; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - - await queue.add( - 'test', - { foo: 'bar' }, - { repeat: { pattern: '*/2 * * * * *' } }, - ); - - this.clock.tick(nextTick); - - let prev: any; - let counter = 0; - - const completing = new Promise(resolve => { - worker.on('completed', async job => { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - counter++; - if (counter == 5) { - resolve(); - } - }); - }); - - worker.run(); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - it('should repeat every 2 seconds with startDate in future', async function () { - this.timeout(10000); - - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const nextTick = 2 * ONE_SECOND + 500; - const delay = 5 * ONE_SECOND + 500; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - await queue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - pattern: '*/2 * * * * *', - startDate: new Date('2017-02-07 9:24:05'), - }, - }, - ); - - this.clock.tick(nextTick + delay); - - let prev: Job; - let counter = 0; - - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - counter++; - if (counter == 5) { - resolve(); - } - }); - }); - - worker.run(); - - await completing; - - await worker.close(); - delayStub.restore(); - }); - - it('should repeat every 2 seconds with startDate in past', async function () { - this.timeout(10000); - - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const nextTick = 2 * ONE_SECOND + 500; - const delay = 5 * ONE_SECOND + 500; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { - repeat: { - pattern: '*/2 * * * * *', - startDate: new Date('2017-02-07 9:22:00'), - }, - }, - ); - - this.clock.tick(nextTick + delay); - - let prev: Job; - let counter = 0; - - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - counter++; - if (counter == 5) { - resolve(); - } - }); - }); - - worker.run(); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - describe('when using removeOnComplete', function () { - it('should remove repeated job', async function () { - this.timeout(10000); - const queueName2 = `test-${v4()}`; - const queue2 = new Queue(queueName2, { - connection, - prefix, - defaultJobOptions: { - removeOnComplete: true, - }, - }); - - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const nextTick = 2 * ONE_SECOND + 500; - const delay = 5 * ONE_SECOND + 500; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - await queue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - pattern: '*/2 * * * * *', - startDate: new Date('2017-02-07 9:24:05'), - }, - }, - ); - - this.clock.tick(nextTick + delay); - - let prev: Job; - let counter = 0; - - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - counter++; - if (counter == 5) { - const counts = await queue2.getJobCounts('completed'); - expect(counts.completed).to.be.equal(0); - resolve(); - } - }); - }); - - worker.run(); - - await completing; - - await queue2.close(); - await worker.close(); - await removeAllQueueData(new IORedis(redisHost), queueName2); - delayStub.restore(); - }); - }); - - describe('when custom cron strategy is provided', function () { - it('should repeat every 2 seconds', async function () { - this.timeout(15000); - const settings = { - repeatStrategy: (millis, opts) => { - const currentDate = - opts.startDate && new Date(opts.startDate) > new Date(millis) - ? new Date(opts.startDate) - : new Date(millis); - const rrule = rrulestr(opts.pattern); - if (rrule.origOptions.count && !rrule.origOptions.dtstart) { - throw new Error('DTSTART must be defined to use COUNT with rrule'); - } - - const next_occurrence = rrule.after(currentDate, false); - return next_occurrence?.getTime(); - }, - }; - const currentQueue = new Queue(queueName, { - connection, - prefix, - settings, - }); - - const nextTick = 2 * ONE_SECOND + 100; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { connection, prefix, settings }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - - await currentQueue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - pattern: 'RRULE:FREQ=SECONDLY;INTERVAL=2;WKST=MO', - }, - }, - ); - - this.clock.tick(nextTick); - - let prev: any; - let counter = 0; - - const completing = new Promise(resolve => { - worker.on('completed', async job => { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - counter++; - if (counter == 5) { - resolve(); - } - }); - }); - - await completing; - await currentQueue.close(); - await worker.close(); - delayStub.restore(); - }); - - describe('when differentiating strategy by job name', function () { - it('should repeat every 2 seconds', async function () { - this.timeout(10000); - const settings = { - repeatStrategy: (millis, opts, name) => { - if (name === 'rrule') { - const currentDate = - opts.startDate && new Date(opts.startDate) > new Date(millis) - ? new Date(opts.startDate) - : new Date(millis); - const rrule = rrulestr(opts.pattern); - if (rrule.origOptions.count && !rrule.origOptions.dtstart) { - throw new Error( - 'DTSTART must be defined to use COUNT with rrule', - ); - } - - const next_occurrence = rrule.after(currentDate, false); - return next_occurrence?.getTime(); - } else { - return getNextMillis(millis, opts); - } - }, - }; - const currentQueue = new Queue(queueName, { - connection, - prefix, - settings, - }); - - const nextTick = 2 * ONE_SECOND + 100; - - const worker = new Worker( - queueName, - async job => { - this.clock.tick(nextTick); - - if (job.opts.repeat!.count == 5) { - const removed = await queue.removeRepeatable('rrule', repeat); - expect(removed).to.be.true; - } - }, - { connection, prefix, settings }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - - const repeat = { - pattern: 'RRULE:FREQ=SECONDLY;INTERVAL=2;WKST=MO', - }; - await currentQueue.add( - 'rrule', - { foo: 'bar' }, - { - repeat, - }, - ); - - this.clock.tick(nextTick); - - let prev: any; - let counter = 0; - - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - try { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - counter++; - if (counter == 5) { - resolve(); - } - } catch (error) { - reject(error); - } - }); - }); - - await completing; - - let prev2: any; - let counter2 = 0; - - const completing2 = new Promise((resolve, reject) => { - worker.on('completed', async job => { - try { - if (prev2) { - expect(prev2.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev2.timestamp).to.be.gte(2000); - } - prev2 = job; - counter2++; - if (counter2 == 5) { - resolve(); - } - } catch (error) { - reject(error); - } - }); - }); - - await queue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - pattern: '*/2 * * * * *', - startDate: new Date('2017-02-07 9:24:05'), - }, - }, - ); - - this.clock.tick(nextTick); - - await completing2; - - await currentQueue.close(); - await worker.close(); - delayStub.restore(); - }); - }); - }); - - it('should have repeatable job key with sha256 hashing when sha256 hash algorithm is provided', async function () { - this.timeout(15000); - const settings = { - repeatKeyHashAlgorithm: 'sha256', - }; - const currentQueue = new Queue(queueName, { connection, prefix, settings }); - - const worker = new Worker(queueName, null, { - connection, - prefix, - settings, - }); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - const jobName = 'jobName'; - const jobId = 'jobId'; - const endDate = ''; - const tz = ''; - const every = 50; - const suffix = every; - await currentQueue.add( - jobName, - { foo: 'bar' }, - { - repeat: { - jobId, - every, - }, - }, - ); - - const keyPrefix = getRepeatableJobKeyPrefix(prefix, queueName); - const client = await worker.client; - - const jobsRedisKeys = await client.keys(`${keyPrefix}*`); - expect(jobsRedisKeys.length).to.be.equal(2); - - const actualHashedRepeatableJobKey = - extractRepeatableJobChecksumFromRedisKey( - jobsRedisKeys[0].length > jobsRedisKeys[1].length - ? jobsRedisKeys[1] - : jobsRedisKeys[0], - ); - const expectedRawKey = createRepeatableJobKey( - jobName, - jobId, - endDate, - tz, - suffix, - ); - const expectedRepeatJobIdCheckum = getRepeatJobIdCheckum( - expectedRawKey, - settings.repeatKeyHashAlgorithm, - ); - - expect(actualHashedRepeatableJobKey).to.be.equal( - expectedRepeatJobIdCheckum, - ); - - await currentQueue.close(); - await worker.close(); - delayStub.restore(); - }); - - it('should repeat every 2 seconds and start immediately', async function () { - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const nextTick = 2 * ONE_SECOND; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - let prev: Job; - let counter = 0; - - const completing = new Promise(resolve => { - worker.on('completed', async job => { - if (prev && counter === 1) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(100); - } else if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(2000); - } - prev = job; - counter++; - if (counter === 5) { - resolve(); - } - }); - }); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { - repeat: { - every: 2000, - immediately: true, - }, - }, - ); - - this.clock.tick(100); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - it('should repeat once a day for 5 days and start immediately using endDate', async function () { - this.timeout(8000); - - const date = new Date('2017-05-05 01:01:00'); - this.clock.setSystemTime(date); - - const nextTick = ONE_DAY + 10 * ONE_SECOND; - const delay = 5 * ONE_SECOND + 500; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { - autorun: false, - connection, - prefix, - skipStalledCheck: true, - skipLockRenewal: true, - }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => { - console.log('delay'); - }); - - let prev: Job; - let counter = 0; - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - if (counter === 1) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(delay); - } else if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(ONE_DAY); - } - prev = job; - - counter++; - if (counter == 5) { - resolve(); - } - }); - }); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { - repeat: { - pattern: '0 1 * * *', - immediately: true, - endDate: new Date('2017-05-10 13:13:00'), - }, - }, - ); - this.clock.tick(delay); - - worker.run(); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - it('should repeat once a day for 5 days and start immediately', async function () { - this.timeout(8000); - - const date = new Date('2017-05-05 01:01:00'); - this.clock.setSystemTime(date); - - const nextTick = ONE_DAY + 10 * ONE_SECOND; - const delay = 5 * ONE_SECOND + 500; - - let counter = 0; - const worker = new Worker( - queueName, - async () => { - if (counter === 0) { - this.clock.tick(6 * ONE_HOUR); - } else { - this.clock.tick(nextTick); - } - }, - { - autorun: false, - connection, - prefix, - skipStalledCheck: true, - skipLockRenewal: true, - }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => { - console.log('delay'); - }); - - let prev: Job; - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - if (counter === 1) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(delay); - } else if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(ONE_DAY); - } - prev = job; - - counter++; - if (counter == 5) { - resolve(); - } - }); - }); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { - repeat: { - pattern: '0 0 7 * * *', - immediately: true, - }, - }, - ); - this.clock.tick(delay); - - worker.run(); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - it('should repeat once a day for 5 days', async function () { - this.timeout(8000); - - const date = new Date('2017-05-05 13:12:00'); - this.clock.setSystemTime(date); - - const nextTick = ONE_DAY + 10 * ONE_SECOND; - const delay = 5 * ONE_SECOND + 500; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { - autorun: false, - connection, - prefix, - skipStalledCheck: true, - skipLockRenewal: true, - }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => { - console.log('delay'); - }); - - let prev: Job; - let counter = 0; - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - try { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(ONE_DAY); - } - prev = job; - - counter++; - if (counter == 5) { - resolve(); - } - } catch (error) { - console.log(error); - } - }); - }); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { - repeat: { - pattern: '0 1 * * *', - endDate: new Date('2017-05-10 01:00:00'), - }, - }, - ); - - this.clock.tick(nextTick + delay); - - worker.run(); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - describe('when utc option is provided', function () { - it('repeats once a day for 5 days', async function () { - this.timeout(8000); - - const date = new Date('2017-05-05 13:12:00'); - this.clock.setSystemTime(date); - - const nextTick = ONE_DAY + 10 * ONE_SECOND; - const delay = 5 * ONE_SECOND + 500; - - const worker = new Worker( - queueName, - async () => { - this.clock.tick(nextTick); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => { - console.log('delay'); - }); - - let prev: Job; - let counter = 0; - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - try { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.processedOn! - prev.timestamp).to.be.gte(ONE_DAY); - } - prev = job; - - counter++; - if (counter == 5) { - resolve(); - } - } catch (err) { - reject(err); - } - }); - }); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { - repeat: { - pattern: '0 1 * * *', - endDate: new Date('2017-05-10 13:13:00'), - tz: 'Europe/Athens', - utc: true, - }, - }, - ); - this.clock.tick(nextTick + delay); - - worker.run(); - - await completing; - await worker.close(); - delayStub.restore(); - }); - }); - - it('should repeat 7:th day every month at 9:25', async function () { - this.timeout(15000); - - const date = new Date('2017-02-02 7:21:42'); - this.clock.setSystemTime(date); - - const nextTick = () => { - const now = moment(); - const nextMonth = moment().add(1, 'months'); - this.clock.tick(nextMonth - now); - }; - - const worker = new Worker( - queueName, - async () => { - nextTick(); - }, - { autorun: false, connection, prefix }, - ); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - let counter = 25; - let prev: Job; - const completing = new Promise((resolve, reject) => { - worker.on('completed', async job => { - try { - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - const diff = moment(job.processedOn!).diff( - moment(prev.timestamp), - 'months', - true, - ); - expect(diff).to.be.gte(1); - } - prev = job; - - counter--; - if (counter == 0) { - resolve(); - } - } catch (error) { - console.log(error); - reject(error); - } - }); - }); - - worker.run(); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { repeat: { pattern: '* 25 9 7 * *' } }, - ); - nextTick(); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - describe('when 2 jobs with the same options are added', function () { - it('creates only one job', async function () { - const options = { - repeat: { - pattern: '0 1 * * *', - }, - }; - - const p1 = queue.add('test', { foo: 'bar' }, options); - const p2 = queue.add('test', { foo: 'bar' }, options); - - const jobs = await Promise.all([p1, p2]); - const configs = await repeat.getRepeatableJobs(0, -1, true); - - const count = await queue.count(); - - expect(count).to.be.equal(1); - expect(configs).to.have.length(1); - expect(jobs.length).to.be.eql(2); - expect(jobs[0].id).to.be.eql(jobs[1].id); - }); - }); - - describe('when repeatable job is promoted', function () { - it('keeps one repeatable and one delayed after being processed', async function () { - const options = { - repeat: { - pattern: '0 * 1 * *', - }, - }; - - const worker = new Worker(queueName, async () => {}, { - connection, - prefix, - }); - - const completing = new Promise(resolve => { - worker.on('completed', () => { - resolve(); - }); - }); - - const repeatableJob = await queue.add('test', { foo: 'bar' }, options); - const delayedCount = await queue.getDelayedCount(); - expect(delayedCount).to.be.equal(1); - - await repeatableJob.promote(); - await completing; - - const delayedCount2 = await queue.getDelayedCount(); - expect(delayedCount2).to.be.equal(1); - - const configs = await repeat.getRepeatableJobs(0, -1, true); - - expect(delayedCount).to.be.equal(1); - - const count = await queue.count(); - - expect(count).to.be.equal(1); - expect(configs).to.have.length(1); - await worker.close(); - }); - }); - - it('should allow removing a named repeatable job', async function () { - const numJobs = 3; - const date = new Date('2017-02-07 9:24:00'); - let prev: Job; - let counter = 0; - - this.clock.setSystemTime(date); - - const nextTick = ONE_SECOND + 1; - const repeat = { pattern: '*/1 * * * * *' }; - let processor; - - const processing = new Promise((resolve, reject) => { - processor = async () => { - counter++; - if (counter == numJobs) { - const removed = await queue.removeRepeatable('remove', repeat); - expect(removed).to.be.true; - this.clock.tick(nextTick); - const delayed = await queue.getDelayed(); - expect(delayed).to.be.empty; - resolve(); - } else if (counter > numJobs) { - reject(Error(`should not repeat more than ${numJobs} times`)); - } - }; - }); - - const worker = new Worker(queueName, processor, { connection, prefix }); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - await queue.add('remove', { foo: 'bar' }, { repeat }); - this.clock.tick(nextTick); - - worker.on('completed', job => { - this.clock.tick(nextTick); - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(ONE_SECOND); - } - prev = job; - }); - - await processing; - await worker.close(); - delayStub.restore(); - }); - - it('should be able to remove repeatable jobs by key', async () => { - const client = await queue.client; - const repeat = { pattern: '*/2 * * * * *' }; - - const createdJob = await queue.add('remove', { foo: 'bar' }, { repeat }); - const delayedCount1 = await queue.getJobCountByTypes('delayed'); - expect(delayedCount1).to.be.equal(1); - const job = await queue.getJob(createdJob.id!); - const repeatableJobs = await queue.getRepeatableJobs(); - expect(repeatableJobs).to.have.length(1); - const existBeforeRemoval = await client.exists( - `${prefix}:${queue.name}:repeat:${createdJob.repeatJobKey!}`, - ); - expect(existBeforeRemoval).to.be.equal(1); - const removed = await queue.removeRepeatableByKey(createdJob.repeatJobKey!); - const delayedCount = await queue.getJobCountByTypes('delayed'); - expect(delayedCount).to.be.equal(0); - const existAfterRemoval = await client.exists( - `${prefix}:${queue.name}:repeat:${createdJob.repeatJobKey!}`, - ); - expect(existAfterRemoval).to.be.equal(0); - expect(job!.repeatJobKey).to.not.be.undefined; - expect(removed).to.be.true; - const repeatableJobsAfterRemove = await queue.getRepeatableJobs(); - expect(repeatableJobsAfterRemove).to.have.length(0); - }); - - describe('when legacy repeatable format is present', function () { - it('should be able to remove legacy repeatable jobs', async () => { - const client = await queue.client; - await client.hmset( - `${prefix}:${queue.name}:repeat:839d4be40c8b2f30fca6f860d0cf76f7:1735711200000`, - 'priority', - 0, - 'delay', - 14524061394, - 'data', - '{}', - 'timestamp', - 1721187138606, - 'rjk', - 'remove::::* 1 * 1 *', - 'name', - 'remove', - ); - await client.zadd( - `${prefix}:${queue.name}:repeat`, - 1735711200000, - 'remove::::* 1 * 1 *', - ); - await client.zadd( - `${prefix}:${queue.name}:delayed`, - 1735711200000, - 'repeat:839d4be40c8b2f30fca6f860d0cf76f7:1735711200000', - ); - - const repeat = { pattern: '* 1 * 1 *' }; - - const repeatableJobs = await queue.getRepeatableJobs(); - expect(repeatableJobs).to.have.length(1); - const removed = await queue.removeRepeatable('remove', repeat); - - const delayedCount = await queue.getJobCountByTypes('delayed'); - expect(delayedCount).to.be.equal(0); - expect(removed).to.be.true; - const repeatableJobsAfterRemove = await queue.getRepeatableJobs(); - expect(repeatableJobsAfterRemove).to.have.length(0); - }); - - it('should be able to remove legacy repeatable jobs by key', async () => { - const client = await queue.client; - await client.hmset( - `${prefix}:${queue.name}:repeat:839d4be40c8b2f30fca6f860d0cf76f7:1735711200000`, - 'priority', - 0, - 'delay', - 14524061394, - 'data', - '{}', - 'timestamp', - 1721187138606, - 'rjk', - 'remove::::* 1 * 1 *', - 'name', - 'remove', - ); - await client.zadd( - `${prefix}:${queue.name}:repeat`, - 1735711200000, - 'remove::::* 1 * 1 *', - ); - await client.zadd( - `${prefix}:${queue.name}:delayed`, - 1735711200000, - 'repeat:839d4be40c8b2f30fca6f860d0cf76f7:1735711200000', - ); - - const repeatableJobs = await queue.getRepeatableJobs(); - expect(repeatableJobs).to.have.length(1); - const removed = await queue.removeRepeatableByKey('remove::::* 1 * 1 *'); - - const delayedCount = await queue.getJobCountByTypes('delayed'); - expect(delayedCount).to.be.equal(0); - expect(removed).to.be.true; - const repeatableJobsAfterRemove = await queue.getRepeatableJobs(); - expect(repeatableJobsAfterRemove).to.have.length(0); - }); - - describe('when re-adding repeatable job now with new format', function () { - it('should keep legacy repeatable job and be able to remove it', async function () { - this.clock.setSystemTime(1721187138606); - const client = await queue.client; - await client.hmset( - `${prefix}:${queue.name}:repeat:839d4be40c8b2f30fca6f860d0cf76f7:1735711200000`, - 'priority', - 0, - 'delay', - 14524061394, - 'data', - '{}', - 'timestamp', - 1721187138606, - 'rjk', - 'remove::::* 1 * 1 *', - 'name', - 'remove', - ); - await client.zadd( - `${prefix}:${queue.name}:repeat`, - 1735711200000, - 'remove::::* 1 * 1 *', - ); - await client.zadd( - `${prefix}:${queue.name}:delayed`, - 1735711200000, - 'repeat:839d4be40c8b2f30fca6f860d0cf76f7:1735711200000', - ); - - const repeat = { pattern: '* 1 * 1 *' }; - - const repeatableJobs = await queue.getRepeatableJobs(); - expect(repeatableJobs).to.have.length(1); - expect(repeatableJobs[0].key).to.be.equal('remove::::* 1 * 1 *'); - const removed = await queue.removeRepeatable('remove', repeat); - - const delayedCount = await queue.getJobCountByTypes('delayed'); - expect(delayedCount).to.be.equal(0); - expect(removed).to.be.true; - const repeatableJobsAfterRemove = await queue.getRepeatableJobs(); - expect(repeatableJobsAfterRemove).to.have.length(0); - }); - - it('should keep legacy repeatable job and delayed referece', async function () { - this.clock.setSystemTime(1721187138606); - - const client = await queue.client; - await client.zadd( - `${prefix}:${queue.name}:repeat`, - 1735693200000, - 'remove::::* 1 * 1 *', - ); - - await queue.add('remove', {}, { repeat: { pattern: '* 1 * 1 *' } }); - const repeatableJobs = await queue.getRepeatableJobs(); - expect(repeatableJobs).to.have.length(1); - expect(repeatableJobs[0].key).to.be.equal('remove::::* 1 * 1 *'); - - const delayedCount = await queue.getJobCountByTypes('delayed'); - expect(delayedCount).to.be.equal(1); - }); - }); - }); - - describe('when repeatable job does not exist', function () { - it('returns false', async () => { - const repeat = { pattern: '*/2 * * * * *' }; - - await queue.add('remove', { foo: 'bar' }, { repeat }); - const repeatableJobs = await queue.getRepeatableJobs(); - expect(repeatableJobs).to.have.length(1); - const removed = await queue.removeRepeatableByKey(repeatableJobs[0].key); - expect(removed).to.be.true; - const removed2 = await queue.removeRepeatableByKey(repeatableJobs[0].key); - expect(removed2).to.be.false; - }); - }); - - it('should allow removing a customId repeatable job', async function () { - const numJobs = 4; - const date = new Date('2017-02-07 9:24:00'); - let prev: Job; - let counter = 0; - let processor; - const jobId = 'xxxx'; - - this.clock.setSystemTime(date); - - const nextTick = 2 * ONE_SECOND + 10; - const repeat = { pattern: '*/2 * * * * *' }; - - await queue.add('test', { foo: 'bar' }, { repeat, jobId }); - - this.clock.tick(nextTick); - - const processing = new Promise((resolve, reject) => { - processor = async () => { - counter++; - if (counter == numJobs) { - try { - await queue.removeRepeatable('test', repeat, jobId); - this.clock.tick(nextTick); - const delayed = await queue.getDelayed(); - expect(delayed).to.be.empty; - resolve(); - } catch (err) { - reject(err); - } - } else if (counter > numJobs) { - reject(Error(`should not repeat more than ${numJobs} times`)); - } - }; - }); - - const worker = new Worker(queueName, processor, { connection, prefix }); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - await worker.waitUntilReady(); - - worker.on('completed', job => { - this.clock.tick(nextTick); - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - }); - - await processing; - await worker.close(); - delayStub.restore(); - }); - - describe('when custom key is provided', function () { - it('should allow removing a repeatable job by custom key', async function () { - const numJobs = 4; - const date = new Date('2017-02-07 9:24:00'); - let prev: Job; - let counter = 0; - let processor; - const key = 'xxxx'; - - this.clock.setSystemTime(date); - - const nextTick = 2 * ONE_SECOND + 10; - const repeat = { pattern: '*/2 * * * * *', key }; - - await queue.add('test', { foo: 'bar' }, { repeat }); - - this.clock.tick(nextTick); - - const processing = new Promise((resolve, reject) => { - processor = async () => { - counter++; - if (counter == numJobs) { - try { - await queue.removeRepeatable('test', repeat); - this.clock.tick(nextTick); - const delayed = await queue.getDelayed(); - expect(delayed).to.be.empty; - resolve(); - } catch (err) { - reject(err); - } - } else if (counter > numJobs) { - reject(Error(`should not repeat more than ${numJobs} times`)); - } - }; - }); - - const worker = new Worker(queueName, processor, { connection, prefix }); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - await worker.waitUntilReady(); - - worker.on('completed', job => { - this.clock.tick(nextTick); - if (prev) { - expect(prev.timestamp).to.be.lt(job.timestamp); - expect(job.timestamp - prev.timestamp).to.be.gte(2000); - } - prev = job; - }); - - await processing; - await worker.close(); - delayStub.restore(); - }); - - it('should keep only one delayed job if adding a new repeatable job with the same key', async function () { - const date = new Date('2017-02-07 9:24:00'); - const key = 'mykey'; - - this.clock.setSystemTime(date); - - const nextTick = 2 * ONE_SECOND; - - await queue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - every: 10_000, - key, - }, - }, - ); - - this.clock.tick(nextTick); - - let jobs = await queue.getRepeatableJobs(); - expect(jobs).to.have.length(1); - - let delayedJobs = await queue.getDelayed(); - expect(delayedJobs).to.have.length(1); - - await queue.add( - 'test2', - { qux: 'baz' }, - { - repeat: { - every: 35_160, - key, - }, - }, - ); - - jobs = await queue.getRepeatableJobs(); - expect(jobs).to.have.length(1); - - delayedJobs = await queue.getDelayed(); - expect(delayedJobs).to.have.length(1); - }); - - it('should keep only one delayed job if adding a new repeatable job with the same key', async function () { - const date = new Date('2017-02-07 9:24:00'); - const key = 'mykey'; - - this.clock.setSystemTime(date); - - const nextTick = 2 * ONE_SECOND; - - await queue.add( - 'test', - { foo: 'bar' }, - { - repeat: { - every: 10_000, - key, - }, - }, - ); - - this.clock.tick(nextTick); - - let jobs = await queue.getRepeatableJobs(); - expect(jobs).to.have.length(1); - - let delayedJobs = await queue.getDelayed(); - expect(delayedJobs).to.have.length(1); - - await queue.add( - 'test2', - { qux: 'baz' }, - { - repeat: { - every: 35_160, - key, - }, - }, - ); - - jobs = await queue.getRepeatableJobs(); - expect(jobs).to.have.length(1); - - delayedJobs = await queue.getDelayed(); - expect(delayedJobs).to.have.length(1); - }); - }); - - // This test is flaky and too complex we need something simpler that tests the same thing - it.skip('should not re-add a repeatable job after it has been removed', async function () { - const repeat = await queue.repeat; - - let worker: Worker; - const jobId = 'xxxx'; - const date = new Date('2017-02-07 9:24:00'); - const nextTick = 2 * ONE_SECOND + 100; - const addNextRepeatableJob = repeat.updateRepeatableJob; - this.clock.setSystemTime(date); - - const repeatOpts = { pattern: '*/2 * * * * *' }; - - const afterRemoved = new Promise(async resolve => { - worker = new Worker( - queueName, - async () => { - const repeatWorker = await worker.repeat; - (repeatWorker.updateRepeatableJob) = async ( - ...args: [string, unknown, JobsOptions, boolean?] - ) => { - // In order to simulate race condition - // Make removeRepeatables happen any time after a moveToX is called - await queue.removeRepeatable('test', repeatOpts, jobId); - - // addNextRepeatableJob will now re-add the removed repeatable - const result = await addNextRepeatableJob.apply(repeat, args); - resolve(); - return result; - }; - }, - { connection, prefix }, - ); - - worker.on('completed', () => { - this.clock.tick(nextTick); - }); - }); - - await queue.add('test', { foo: 'bar' }, { repeat: repeatOpts, jobId }); - - this.clock.tick(nextTick); - - await afterRemoved; - - const jobs = await queue.getRepeatableJobs(); - // Repeatable job was recreated - expect(jobs.length).to.eql(0); - - await worker!.close(); - }); - - it('should allow adding a repeatable job after removing it', async function () { - const repeat = { - pattern: '*/5 * * * *', - }; - - const worker = new Worker(queueName, NoopProc, { connection, prefix }); - await worker.waitUntilReady(); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - await queue.add( - 'myTestJob', - { - data: '2', - }, - { - repeat, - }, - ); - let delayed = await queue.getDelayed(); - expect(delayed.length).to.be.eql(1); - - await new Promise(resolve => { - queueEvents.on('removed', async ({ jobId, prev }) => { - expect(jobId).to.be.equal(delayed[0].id); - expect(prev).to.be.equal('delayed'); - resolve(); - }); - - queue.removeRepeatable('myTestJob', repeat); - }); - - delayed = await queue.getDelayed(); - expect(delayed.length).to.be.eql(0); - - await queue.add('myTestJob', { data: '2' }, { repeat: repeat }); - - delayed = await queue.getDelayed(); - expect(delayed.length).to.be.eql(1); - - // We need to force close in this case, as closing is too slow in Dragonfly. - await worker.close(true); - delayStub.restore(); - }).timeout(8000); - - it('should not allow to remove a delayed job if it belongs to a repeatable job', async function () { - const repeat = { - every: 1000, - }; - - await queue.add('myTestJob', { data: 'foo' }, { repeat }); - - // Get delayed jobs - const delayed = await queue.getDelayed(); - expect(delayed.length).to.be.eql(1); - - // Try to remove the delayed job - const job = delayed[0]; - await expect(job.remove()).to.be.rejectedWith( - `Job ${job.id} belongs to a job scheduler and cannot be removed directly. remove`, - ); - }); - - it('should not repeat more than 5 times', async function () { - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const nextTick = ONE_SECOND + 500; - - const worker = new Worker(queueName, NoopProc, { connection, prefix }); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - await queue.add( - 'repeat', - { foo: 'bar' }, - { repeat: { limit: 5, pattern: '*/1 * * * * *' } }, - ); - this.clock.tick(nextTick); - - let counter = 0; - - const completing = new Promise((resolve, reject) => { - worker.on('completed', () => { - this.clock.tick(nextTick); - counter++; - if (counter == 5) { - resolve(); - } else if (counter > 5) { - reject(Error('should not repeat more than 5 times')); - } - }); - }); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - it('should processes delayed jobs by priority', async function () { - let currentPriority = 1; - const nextTick = 1000; - - let processor; - this.clock.setSystemTime(new Date('2017-02-02 7:21:42')); - - const processing = new Promise((resolve, reject) => { - processor = async (job: Job) => { - try { - expect(job.id).to.be.ok; - expect(job.data.p).to.be.eql(currentPriority++); - } catch (err) { - reject(err); - } - - if (currentPriority > 3) { - resolve(); - } - }; - }); - - await Promise.all([ - queue.add('test', { p: 1 }, { priority: 1, delay: nextTick * 3 }), - queue.add('test', { p: 2 }, { priority: 2, delay: nextTick * 2 }), - queue.add('test', { p: 3 }, { priority: 3, delay: nextTick }), - ]); - - this.clock.tick(nextTick * 3 + 100); - - const worker = new Worker(queueName, processor, { connection, prefix }); - await worker.waitUntilReady(); - - await processing; - - await worker.close(); - }); - - it('should use ".every" as a valid interval', async function () { - const interval = ONE_SECOND * 2; - const date = new Date('2017-02-07 9:24:00'); - - this.clock.setSystemTime(date); - - const nextTick = ONE_SECOND * 2 + 500; - - await queue.add('repeat m', { type: 'm' }, { repeat: { every: interval } }); - await queue.add('repeat s', { type: 's' }, { repeat: { every: interval } }); - this.clock.tick(nextTick); - - const worker = new Worker(queueName, async () => {}, { - connection, - prefix, - }); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - await worker.waitUntilReady(); - - let prevType: string; - let counter = 0; - - const completing = new Promise(resolve => { - worker.on('completed', job => { - this.clock.tick(nextTick); - if (prevType) { - expect(prevType).to.not.be.eql(job.data.type); - } - prevType = job.data.type; - counter++; - if (counter == 20) { - resolve(); - } - }); - }); - - await completing; - await worker.close(); - delayStub.restore(); - }); - - it('should throw an error when using .pattern and .every simultaneously', async function () { - await expect( - queue.add( - 'repeat', - { type: 'm' }, - { repeat: { every: 5000, pattern: '* /1 * * * * *' } }, - ), - ).to.be.rejectedWith( - 'Both .pattern and .every options are defined for this repeatable job', - ); - }); - - it('should emit a waiting event when adding a repeatable job to the waiting list', async function () { - const date = new Date('2017-02-07 9:24:00'); - this.clock.setSystemTime(date); - const nextTick = 1 * ONE_SECOND + 500; - - const worker = new Worker(queueName, async job => {}, { - connection, - prefix, - }); - const delayStub = sinon.stub(worker, 'delay').callsFake(async () => {}); - - const waiting = new Promise((resolve, reject) => { - queueEvents.on('waiting', function ({ jobId }) { - try { - expect(jobId).to.be.equal( - `repeat:16db7a9b166154f5c636abf3c8fe3364:${ - date.getTime() + 1 * ONE_SECOND - }`, - ); - resolve(); - } catch (err) { - reject(err); - } - }); - }); - - await queue.add( - 'test', - { foo: 'bar' }, - { repeat: { pattern: '*/1 * * * * *' } }, - ); - this.clock.tick(nextTick); - - await waiting; - await worker.close(); - delayStub.restore(); - }); - - it('should have the right count value', async function () { - await queue.add('test', { foo: 'bar' }, { repeat: { every: 1000 } }); - this.clock.tick(ONE_SECOND + 100); - - let processor; - const processing = new Promise((resolve, reject) => { - processor = async (job: Job) => { - if (job.opts.repeat!.count === 1) { - resolve(); - } else { - reject(new Error('repeatable job got the wrong repeat count')); - } - }; - }); - - const worker = new Worker(queueName, processor, { connection, prefix }); - - await processing; - await worker.close(); - }); -}); diff --git a/tests/test_sandboxed_process.ts b/tests/test_sandboxed_process.ts index c246a06458..b7d9bc65de 100644 --- a/tests/test_sandboxed_process.ts +++ b/tests/test_sandboxed_process.ts @@ -28,6 +28,7 @@ describe('Sandboxed process using child processes', () => { queueName = `test-${v4()}`; queue = new Queue(queueName, { connection, prefix }); queueEvents = new QueueEvents(queueName, { connection, prefix }); + await queue.waitUntilReady(); await queueEvents.waitUntilReady(); }); @@ -462,6 +463,7 @@ function sandboxProcessTests( resolve(); }); }); + await worker.waitUntilReady(); const inspect = stderr.inspect(); await queue.add('test', { foo: 'bar' }); @@ -696,6 +698,7 @@ function sandboxProcessTests( drainDelay: 1, useWorkerThreads, }); + await worker.waitUntilReady(); const completing = new Promise((resolve, reject) => { worker.on('completed', async (job: Job, value: any) => { diff --git a/tests/test_stalled_jobs.ts b/tests/test_stalled_jobs.ts index 2ccdf5c8a4..d184194465 100644 --- a/tests/test_stalled_jobs.ts +++ b/tests/test_stalled_jobs.ts @@ -378,7 +378,7 @@ describe('stalled jobs', function () { await queueEvents.close(); }); - describe('when failParentOnFailure is provided as true', function () { + describe('when onChildFailure is provided as fail', function () { it('should move parent to failed when child is moved to failed', async function () { this.timeout(6000); const concurrency = 4; @@ -421,7 +421,7 @@ describe('stalled jobs', function () { name: 'test', data: { foo: 'bar' }, queueName, - opts: { failParentOnFailure: true }, + opts: { onChildFailure: 'fail' }, }, ], }); @@ -467,7 +467,7 @@ describe('stalled jobs', function () { }); }); - describe('when ignoreDependencyOnFailure is provided as true', function () { + describe('when onChildFailure is provided as ignore', function () { it('should move parent to waiting when child is moved to failed and save child failedReason', async function () { this.timeout(6000); const concurrency = 4; @@ -510,7 +510,7 @@ describe('stalled jobs', function () { name: 'test', data: { foo: 'bar' }, queueName, - opts: { ignoreDependencyOnFailure: true }, + opts: { onChildFailure: 'ignore' }, }, ], }); @@ -561,7 +561,7 @@ describe('stalled jobs', function () { }); }); - describe('when removeDependencyOnFailure is provided as true', function () { + describe('when onChildFailure is provided as remove', function () { it('should move parent to waiting when child is moved to failed', async function () { this.timeout(6000); const concurrency = 4; @@ -604,7 +604,9 @@ describe('stalled jobs', function () { name: 'test', data: { foo: 'bar' }, queueName, - opts: { removeDependencyOnFailure: true }, + opts: { + onChildFailure: 'remove', + }, }, ], }); diff --git a/tests/test_worker.ts b/tests/test_worker.ts index c8f7e38f11..4ed21e6d6d 100644 --- a/tests/test_worker.ts +++ b/tests/test_worker.ts @@ -15,11 +15,7 @@ import { } from '../src/classes'; import { KeepJobs, MinimalJob } from '../src/interfaces'; import { JobsOptions } from '../src/types'; -import { - delay, - isRedisVersionLowerThan, - removeAllQueueData, -} from '../src/utils'; +import { delay, isVersionLowerThan, removeAllQueueData } from '../src/utils'; describe('workers', function () { const redisHost = process.env.REDIS_HOST || 'localhost'; @@ -40,6 +36,7 @@ describe('workers', function () { queueName = `test-${v4()}`; queue = new Queue(queueName, { connection, prefix }); queueEvents = new QueueEvents(queueName, { connection, prefix }); + await queue.waitUntilReady(); await queueEvents.waitUntilReady(); }); @@ -482,7 +479,7 @@ describe('workers', function () { // Add spy to worker.moveToActive const spy = sinon.spy(worker, 'moveToActive'); const bclientSpy = sinon.spy( - await worker.blockingConnection.client, + await (worker as any).blockingConnection.client, 'bzpopmin', ); @@ -492,8 +489,6 @@ describe('workers', function () { expect(job.data.foo).to.be.eql('bar'); } - expect(bclientSpy.callCount).to.be.equal(1); - await new Promise((resolve, reject) => { worker.on('completed', (_job: Job, _result: any) => { completedJobs++; @@ -533,7 +528,7 @@ describe('workers', function () { // Add spy to worker.moveToActive const spy = sinon.spy(worker, 'moveToActive'); const bclientSpy = sinon.spy( - await worker.blockingConnection.client, + await (worker as any).blockingConnection.client, 'bzpopmin', ); await worker.waitUntilReady(); @@ -832,7 +827,7 @@ describe('workers', function () { }); await worker.waitUntilReady(); const client = await worker.client; - if (isRedisVersionLowerThan(worker.redisVersion, '7.0.8')) { + if (isVersionLowerThan(worker.redisVersion, '7.0.8')) { await client.bzpopmin(`key`, 0.002); } else { await client.bzpopmin(`key`, 0.001); @@ -946,7 +941,7 @@ describe('workers', function () { }); await worker.waitUntilReady(); - if (isRedisVersionLowerThan(worker.redisVersion, '7.0.8')) { + if (isVersionLowerThan(worker.redisVersion, '7.0.8')) { expect(worker['getBlockTimeout'](0)).to.be.equal(0.002); } else { expect(worker['getBlockTimeout'](0)).to.be.equal(0.001); @@ -982,7 +977,7 @@ describe('workers', function () { }); await worker.waitUntilReady(); - if (isRedisVersionLowerThan(worker.redisVersion, '7.0.8')) { + if (isVersionLowerThan(worker.redisVersion, '7.0.8')) { expect( worker['getBlockTimeout'](Date.now() + 100), ).to.be.greaterThan(0.002); @@ -1818,7 +1813,7 @@ describe('workers', function () { }); describe('when queue is paused and retry a job', () => { - it('moves job to paused', async () => { + it('moves job to wait', async () => { const worker = new Worker( queueName, async () => { @@ -1848,7 +1843,7 @@ describe('workers', function () { await queue.pause(); await job.retry('completed'); - const pausedJobsCount = await queue.getJobCountByTypes('paused'); + const pausedJobsCount = await queue.getJobCountByTypes('wait'); expect(pausedJobsCount).to.be.equal(1); await worker.close(); @@ -3548,12 +3543,7 @@ describe('workers', function () { connection, prefix, settings: { - backoffStrategy: ( - attemptsMade: number, - type: string, - err: Error, - job: MinimalJob, - ) => { + backoffStrategy: (attemptsMade: number, type: string) => { switch (type) { case 'custom1': { return attemptsMade * 1000; @@ -4331,7 +4321,7 @@ describe('workers', function () { }, }); - if (isRedisVersionLowerThan(childrenWorker.redisVersion, '7.2.0')) { + if (isVersionLowerThan(childrenWorker.redisVersion, '7.2.0')) { expect(unprocessed1!.length).to.be.greaterThanOrEqual(50); expect(nextCursor1).to.not.be.equal(0); } else { @@ -4347,7 +4337,7 @@ describe('workers', function () { }, }); - if (isRedisVersionLowerThan(childrenWorker.redisVersion, '7.2.0')) { + if (isVersionLowerThan(childrenWorker.redisVersion, '7.2.0')) { expect(unprocessed2!.length).to.be.lessThanOrEqual(15); expect(nextCursor2).to.be.equal(0); } else {