| 1 | const { expect } = require('expect') |
| 2 | const { FixedClusterPool, PoolEvents } = require('../../../lib') |
| 3 | const { WorkerFunctions } = require('../../test-types') |
| 4 | const { waitWorkerEvents } = require('../../test-utils') |
| 5 | |
| 6 | describe('Fixed cluster pool test suite', () => { |
| 7 | const numberOfWorkers = 6 |
| 8 | const pool = new FixedClusterPool( |
| 9 | numberOfWorkers, |
| 10 | './tests/worker-files/cluster/testWorker.js', |
| 11 | { |
| 12 | errorHandler: e => console.error(e) |
| 13 | } |
| 14 | ) |
| 15 | const queuePool = new FixedClusterPool( |
| 16 | numberOfWorkers, |
| 17 | './tests/worker-files/cluster/testWorker.js', |
| 18 | { |
| 19 | enableTasksQueue: true, |
| 20 | tasksQueueOptions: { |
| 21 | concurrency: 2 |
| 22 | }, |
| 23 | errorHandler: e => console.error(e) |
| 24 | } |
| 25 | ) |
| 26 | const emptyPool = new FixedClusterPool( |
| 27 | numberOfWorkers, |
| 28 | './tests/worker-files/cluster/emptyWorker.js', |
| 29 | { exitHandler: () => console.log('empty pool worker exited') } |
| 30 | ) |
| 31 | const echoPool = new FixedClusterPool( |
| 32 | numberOfWorkers, |
| 33 | './tests/worker-files/cluster/echoWorker.js' |
| 34 | ) |
| 35 | const errorPool = new FixedClusterPool( |
| 36 | numberOfWorkers, |
| 37 | './tests/worker-files/cluster/errorWorker.js', |
| 38 | { |
| 39 | errorHandler: e => console.error(e) |
| 40 | } |
| 41 | ) |
| 42 | const asyncErrorPool = new FixedClusterPool( |
| 43 | numberOfWorkers, |
| 44 | './tests/worker-files/cluster/asyncErrorWorker.js', |
| 45 | { |
| 46 | errorHandler: e => console.error(e) |
| 47 | } |
| 48 | ) |
| 49 | const asyncPool = new FixedClusterPool( |
| 50 | numberOfWorkers, |
| 51 | './tests/worker-files/cluster/asyncWorker.js' |
| 52 | ) |
| 53 | |
| 54 | after('Destroy all pools', async () => { |
| 55 | // We need to clean up the resources after our test |
| 56 | await echoPool.destroy() |
| 57 | await asyncPool.destroy() |
| 58 | await errorPool.destroy() |
| 59 | await asyncErrorPool.destroy() |
| 60 | await emptyPool.destroy() |
| 61 | await queuePool.destroy() |
| 62 | }) |
| 63 | |
| 64 | it('Verify that the function is executed in a worker cluster', async () => { |
| 65 | let result = await pool.execute({ |
| 66 | function: WorkerFunctions.fibonacci |
| 67 | }) |
| 68 | expect(result).toBe(75025) |
| 69 | result = await pool.execute({ |
| 70 | function: WorkerFunctions.factorial |
| 71 | }) |
| 72 | expect(result).toBe(9.33262154439441e157) |
| 73 | }) |
| 74 | |
| 75 | it('Verify that is possible to invoke the execute() method without input', async () => { |
| 76 | const result = await pool.execute() |
| 77 | expect(result).toBe(false) |
| 78 | }) |
| 79 | |
| 80 | it("Verify that 'busy' event is emitted", async () => { |
| 81 | let poolBusy = 0 |
| 82 | pool.emitter.on(PoolEvents.busy, () => ++poolBusy) |
| 83 | for (let i = 0; i < numberOfWorkers * 2; i++) { |
| 84 | pool.execute() |
| 85 | } |
| 86 | // The `busy` event is triggered when the number of submitted tasks at once reach the number of fixed pool workers. |
| 87 | // So in total numberOfWorkers + 1 times for a loop submitting up to numberOfWorkers * 2 tasks to the fixed pool. |
| 88 | expect(poolBusy).toBe(numberOfWorkers + 1) |
| 89 | }) |
| 90 | |
| 91 | it('Verify that tasks queuing is working', async () => { |
| 92 | const promises = new Set() |
| 93 | const maxMultiplier = 2 |
| 94 | for (let i = 0; i < numberOfWorkers * maxMultiplier; i++) { |
| 95 | promises.add(queuePool.execute()) |
| 96 | } |
| 97 | expect(promises.size).toBe(numberOfWorkers * maxMultiplier) |
| 98 | for (const workerNode of queuePool.workerNodes) { |
| 99 | expect(workerNode.usage.tasks.executing).toBeLessThanOrEqual( |
| 100 | queuePool.opts.tasksQueueOptions.concurrency |
| 101 | ) |
| 102 | expect(workerNode.usage.tasks.executed).toBe(0) |
| 103 | expect(workerNode.usage.tasks.queued).toBeGreaterThan(0) |
| 104 | expect(workerNode.usage.tasks.maxQueued).toBeGreaterThan(0) |
| 105 | } |
| 106 | expect(queuePool.info.executingTasks).toBe(numberOfWorkers) |
| 107 | expect(queuePool.info.queuedTasks).toBe( |
| 108 | numberOfWorkers * maxMultiplier - numberOfWorkers |
| 109 | ) |
| 110 | expect(queuePool.info.maxQueuedTasks).toBe( |
| 111 | numberOfWorkers * maxMultiplier - numberOfWorkers |
| 112 | ) |
| 113 | await Promise.all(promises) |
| 114 | for (const workerNode of queuePool.workerNodes) { |
| 115 | expect(workerNode.usage.tasks.executing).toBe(0) |
| 116 | expect(workerNode.usage.tasks.executed).toBeGreaterThan(0) |
| 117 | expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(maxMultiplier) |
| 118 | expect(workerNode.usage.tasks.queued).toBe(0) |
| 119 | expect(workerNode.usage.tasks.maxQueued).toBe(1) |
| 120 | } |
| 121 | }) |
| 122 | |
| 123 | it('Verify that is possible to have a worker that return undefined', async () => { |
| 124 | const result = await emptyPool.execute() |
| 125 | expect(result).toBeUndefined() |
| 126 | }) |
| 127 | |
| 128 | it('Verify that data are sent to the worker correctly', async () => { |
| 129 | const data = { f: 10 } |
| 130 | const result = await echoPool.execute(data) |
| 131 | expect(result).toStrictEqual(data) |
| 132 | }) |
| 133 | |
| 134 | it('Verify that error handling is working properly:sync', async () => { |
| 135 | const data = { f: 10 } |
| 136 | let taskError |
| 137 | errorPool.emitter.on(PoolEvents.taskError, e => { |
| 138 | taskError = e |
| 139 | }) |
| 140 | let inError |
| 141 | try { |
| 142 | await errorPool.execute(data) |
| 143 | } catch (e) { |
| 144 | inError = e |
| 145 | } |
| 146 | expect(inError).toBeDefined() |
| 147 | expect(typeof inError === 'string').toBe(true) |
| 148 | expect(inError).toBe('Error Message from ClusterWorker') |
| 149 | expect(taskError).toStrictEqual({ |
| 150 | workerId: expect.any(Number), |
| 151 | message: 'Error Message from ClusterWorker', |
| 152 | data |
| 153 | }) |
| 154 | expect( |
| 155 | errorPool.workerNodes.some( |
| 156 | workerNode => workerNode.usage.tasks.failed === 1 |
| 157 | ) |
| 158 | ).toBe(true) |
| 159 | }) |
| 160 | |
| 161 | it('Verify that error handling is working properly:async', async () => { |
| 162 | const data = { f: 10 } |
| 163 | let taskError |
| 164 | asyncErrorPool.emitter.on(PoolEvents.taskError, e => { |
| 165 | taskError = e |
| 166 | }) |
| 167 | let inError |
| 168 | try { |
| 169 | await asyncErrorPool.execute(data) |
| 170 | } catch (e) { |
| 171 | inError = e |
| 172 | } |
| 173 | expect(inError).toBeDefined() |
| 174 | expect(typeof inError === 'string').toBe(true) |
| 175 | expect(inError).toBe('Error Message from ClusterWorker:async') |
| 176 | expect(taskError).toStrictEqual({ |
| 177 | workerId: expect.any(Number), |
| 178 | message: 'Error Message from ClusterWorker:async', |
| 179 | data |
| 180 | }) |
| 181 | expect( |
| 182 | asyncErrorPool.workerNodes.some( |
| 183 | workerNode => workerNode.usage.tasks.failed === 1 |
| 184 | ) |
| 185 | ).toBe(true) |
| 186 | }) |
| 187 | |
| 188 | it('Verify that async function is working properly', async () => { |
| 189 | const data = { f: 10 } |
| 190 | const startTime = performance.now() |
| 191 | const result = await asyncPool.execute(data) |
| 192 | const usedTime = performance.now() - startTime |
| 193 | expect(result).toStrictEqual(data) |
| 194 | expect(usedTime).toBeGreaterThanOrEqual(2000) |
| 195 | }) |
| 196 | |
| 197 | it('Shutdown test', async () => { |
| 198 | const exitPromise = waitWorkerEvents(pool, 'exit', numberOfWorkers) |
| 199 | await pool.destroy() |
| 200 | const numberOfExitEvents = await exitPromise |
| 201 | expect(numberOfExitEvents).toBe(numberOfWorkers) |
| 202 | }) |
| 203 | |
| 204 | it('Verify that cluster pool options are checked', async () => { |
| 205 | const workerFilePath = './tests/worker-files/cluster/testWorker.js' |
| 206 | let pool1 = new FixedClusterPool(numberOfWorkers, workerFilePath) |
| 207 | expect(pool1.opts.env).toBeUndefined() |
| 208 | expect(pool1.opts.settings).toBeUndefined() |
| 209 | await pool1.destroy() |
| 210 | pool1 = new FixedClusterPool(numberOfWorkers, workerFilePath, { |
| 211 | env: { TEST: 'test' }, |
| 212 | settings: { args: ['--use', 'http'], silent: true } |
| 213 | }) |
| 214 | expect(pool1.opts.env).toStrictEqual({ TEST: 'test' }) |
| 215 | expect(pool1.opts.settings).toStrictEqual({ |
| 216 | args: ['--use', 'http'], |
| 217 | silent: true |
| 218 | }) |
| 219 | expect({ ...pool1.opts.settings, exec: workerFilePath }).toStrictEqual({ |
| 220 | args: ['--use', 'http'], |
| 221 | silent: true, |
| 222 | exec: workerFilePath |
| 223 | }) |
| 224 | await pool1.destroy() |
| 225 | }) |
| 226 | |
| 227 | it('Should work even without opts in input', async () => { |
| 228 | const pool1 = new FixedClusterPool( |
| 229 | numberOfWorkers, |
| 230 | './tests/worker-files/cluster/testWorker.js' |
| 231 | ) |
| 232 | const res = await pool1.execute() |
| 233 | expect(res).toBe(false) |
| 234 | // We need to clean up the resources after our test |
| 235 | await pool1.destroy() |
| 236 | }) |
| 237 | |
| 238 | it('Verify that a pool with zero worker fails', async () => { |
| 239 | expect( |
| 240 | () => |
| 241 | new FixedClusterPool(0, './tests/worker-files/cluster/testWorker.js') |
| 242 | ).toThrowError('Cannot instantiate a fixed pool with no worker') |
| 243 | }) |
| 244 | }) |