}
}
+ private get starting (): boolean {
+ return this.workerNodes.some(workerNode => !workerNode.info.started)
+ }
+
+ private get started (): boolean {
+ return this.workerNodes.some(workerNode => workerNode.info.started)
+ }
+
/** @inheritDoc */
public get info (): PoolInfo {
return {
workerNodes: this.workerNodes.length,
idleWorkerNodes: this.workerNodes.reduce(
(accumulator, workerNode) =>
- workerNode.workerUsage.tasks.executing === 0
+ workerNode.usage.tasks.executing === 0
? accumulator + 1
: accumulator,
0
),
busyWorkerNodes: this.workerNodes.reduce(
(accumulator, workerNode) =>
- workerNode.workerUsage.tasks.executing > 0
- ? accumulator + 1
- : accumulator,
+ workerNode.usage.tasks.executing > 0 ? accumulator + 1 : accumulator,
0
),
executedTasks: this.workerNodes.reduce(
(accumulator, workerNode) =>
- accumulator + workerNode.workerUsage.tasks.executed,
+ accumulator + workerNode.usage.tasks.executed,
0
),
executingTasks: this.workerNodes.reduce(
(accumulator, workerNode) =>
- accumulator + workerNode.workerUsage.tasks.executing,
+ accumulator + workerNode.usage.tasks.executing,
0
),
queuedTasks: this.workerNodes.reduce(
(accumulator, workerNode) =>
- accumulator + workerNode.workerUsage.tasks.queued,
+ accumulator + workerNode.usage.tasks.queued,
0
),
maxQueuedTasks: this.workerNodes.reduce(
(accumulator, workerNode) =>
- accumulator + workerNode.workerUsage.tasks.maxQueued,
+ accumulator + workerNode.usage.tasks.maxQueued,
0
),
failedTasks: this.workerNodes.reduce(
(accumulator, workerNode) =>
- accumulator + workerNode.workerUsage.tasks.failed,
+ accumulator + workerNode.usage.tasks.failed,
0
)
}
*/
protected abstract get maxSize (): number
+ /**
+ * Get the worker given its id.
+ *
+ * @param workerId - The worker id.
+ * @returns The worker if found in the pool worker nodes, `undefined` otherwise.
+ */
+ private getWorkerById (workerId: number): Worker | undefined {
+ return this.workerNodes.find(workerNode => workerNode.info.id === workerId)
+ ?.worker
+ }
+
/**
* Gets the given worker its worker node key.
*
* @param worker - The worker.
- * @returns The worker node key if the worker is found in the pool worker nodes, `-1` otherwise.
+ * @returns The worker node key if found in the pool worker nodes, `-1` otherwise.
*/
private getWorkerNodeKey (worker: Worker): number {
return this.workerNodes.findIndex(
protected internalBusy (): boolean {
return (
this.workerNodes.findIndex(workerNode => {
- return workerNode.workerUsage.tasks.executing === 0
+ return workerNode.usage.tasks.executing === 0
}) === -1
)
}
if (
this.opts.enableTasksQueue === true &&
(this.busy ||
- this.workerNodes[workerNodeKey].workerUsage.tasks.executing >=
+ this.workerNodes[workerNodeKey].usage.tasks.executing >=
((this.opts.tasksQueueOptions as TasksQueueOptions)
.concurrency as number))
) {
workerNodeKey: number,
task: Task<Data>
): void {
- const workerUsage = this.workerNodes[workerNodeKey].workerUsage
+ const workerUsage = this.workerNodes[workerNodeKey].usage
++workerUsage.tasks.executing
this.updateWaitTimeWorkerUsage(workerUsage, task)
}
worker: Worker,
message: MessageValue<Response>
): void {
- const workerUsage =
- this.workerNodes[this.getWorkerNodeKey(worker)].workerUsage
+ const workerUsage = this.workerNodes[this.getWorkerNodeKey(worker)].usage
this.updateTaskStatisticsWorkerUsage(workerUsage, message)
this.updateRunTimeWorkerUsage(workerUsage, message)
this.updateEluWorkerUsage(workerUsage, message)
if (this.emitter != null) {
this.emitter.emit(PoolEvents.error, error)
}
- if (this.opts.restartWorkerOnError === true) {
+ if (this.opts.restartWorkerOnError === true && !this.starting) {
this.createAndSetupWorker()
}
})
isKillBehavior(KillBehaviors.HARD, message.kill) ||
(message.kill != null &&
((this.opts.enableTasksQueue === false &&
- this.workerNodes[workerNodeKey].workerUsage.tasks.executing ===
- 0) ||
+ this.workerNodes[workerNodeKey].usage.tasks.executing === 0) ||
(this.opts.enableTasksQueue === true &&
- this.workerNodes[workerNodeKey].workerUsage.tasks.executing ===
- 0 &&
+ this.workerNodes[workerNodeKey].usage.tasks.executing === 0 &&
this.tasksQueueSize(workerNodeKey) === 0)))
) {
// Kill message received from the worker: no new tasks are submitted to that worker for a while ( > maxInactiveTime)
*/
protected workerListener (): (message: MessageValue<Response>) => void {
return message => {
- if (message.id != null) {
+ if (message.workerId != null && message.started != null) {
+ // Worker started message received
+ this.workerNodes[
+ this.getWorkerNodeKey(this.getWorkerById(message.workerId) as Worker)
+ ].info.started = message.started
+ } else if (message.id != null) {
// Task execution response received
const promiseResponse = this.promiseResponseMap.get(message.id)
if (promiseResponse != null) {
workerNode: WorkerNode<Worker, Data>,
workerUsage: WorkerUsage
): void {
- workerNode.workerUsage = workerUsage
+ workerNode.usage = workerUsage
}
/**
private pushWorkerNode (worker: Worker): number {
this.workerNodes.push({
worker,
- workerUsage: this.getWorkerUsage(),
+ info: { id: worker.threadId ?? worker.id, started: false },
+ usage: this.getWorkerUsage(),
tasksQueue: new Queue<Task<Data>>()
})
const workerNodeKey = this.getWorkerNodeKey(worker)
// *
// * @param workerNodeKey - The worker node key.
// * @param worker - The worker.
+ // * @param workerInfo - The worker info.
// * @param workerUsage - The worker usage.
// * @param tasksQueue - The worker task queue.
// */
// private setWorkerNode (
// workerNodeKey: number,
// worker: Worker,
+ // workerInfo: WorkerInfo,
// workerUsage: WorkerUsage,
// tasksQueue: Queue<Task<Data>>
// ): void {
// this.workerNodes[workerNodeKey] = {
// worker,
- // workerUsage,
+ // info: workerInfo,
+ // usage: workerUsage,
// tasksQueue
// }
// }
*/
protected getWorkerTaskRunTime (workerNodeKey: number): number {
return this.taskStatisticsRequirements.runTime.median
- ? this.pool.workerNodes[workerNodeKey].workerUsage.runTime.median
- : this.pool.workerNodes[workerNodeKey].workerUsage.runTime.average
+ ? this.pool.workerNodes[workerNodeKey].usage.runTime.median
+ : this.pool.workerNodes[workerNodeKey].usage.runTime.average
}
/**
*/
protected getWorkerTaskWaitTime (workerNodeKey: number): number {
return this.taskStatisticsRequirements.waitTime.median
- ? this.pool.workerNodes[workerNodeKey].workerUsage.waitTime.median
- : this.pool.workerNodes[workerNodeKey].workerUsage.waitTime.average
+ ? this.pool.workerNodes[workerNodeKey].usage.waitTime.median
+ : this.pool.workerNodes[workerNodeKey].usage.waitTime.average
}
/**
*/
protected getWorkerTaskElu (workerNodeKey: number): number {
return this.taskStatisticsRequirements.elu.median
- ? this.pool.workerNodes[workerNodeKey].workerUsage.elu.active.median
- : this.pool.workerNodes[workerNodeKey].workerUsage.elu.active.average
+ ? this.pool.workerNodes[workerNodeKey].usage.elu.active.median
+ : this.pool.workerNodes[workerNodeKey].usage.elu.active.average
}
protected computeDefaultWorkerWeight (): number {
// */
// private findFirstFreeWorkerNodeKey (): number {
// return this.pool.workerNodes.findIndex(workerNode => {
- // return workerNode.workerUsage.tasks.executing === 0
+ // return workerNode.usage.tasks.executing === 0
// })
// }
// private findLastFreeWorkerNodeKey (): number {
// // It requires node >= 18.0.0:
// // return this.workerNodes.findLastIndex(workerNode => {
- // // return workerNode.workerUsage.tasks.executing === 0
+ // // return workerNode.usage.tasks.executing === 0
// // })
// for (
// let workerNodeKey = this.pool.workerNodes.length - 1;
// workerNodeKey >= 0;
// workerNodeKey--
// ) {
- // if (
- // this.pool.workerNodes[workerNodeKey].workerUsage.tasks.executing === 0
- // ) {
+ // if (this.pool.workerNodes[workerNodeKey].usage.tasks.executing === 0) {
// return workerNodeKey
// }
// }
let minTime = Infinity
for (const [workerNodeKey, workerNode] of this.pool.workerNodes.entries()) {
const workerTime =
- workerNode.workerUsage.runTime.aggregate +
- workerNode.workerUsage.waitTime.aggregate
+ workerNode.usage.runTime.aggregate + workerNode.usage.waitTime.aggregate
if (workerTime === 0) {
this.nextWorkerNodeId = workerNodeKey
break
public choose (): number {
let minWorkerElu = Infinity
for (const [workerNodeKey, workerNode] of this.pool.workerNodes.entries()) {
- const workerUsage = workerNode.workerUsage
+ const workerUsage = workerNode.usage
const workerElu = workerUsage.elu?.active.aggregate ?? 0
if (workerElu === 0) {
this.nextWorkerNodeId = workerNodeKey
public choose (): number {
let minNumberOfTasks = Infinity
for (const [workerNodeKey, workerNode] of this.pool.workerNodes.entries()) {
- const workerTaskStatistics = workerNode.workerUsage.tasks
+ const workerTaskStatistics = workerNode.usage.tasks
const workerTasks =
workerTaskStatistics.executed +
workerTaskStatistics.executing +
failed: number
}
+/**
+ * Worker information.
+ *
+ * @internal
+ */
+export interface WorkerInfo {
+ /**
+ * Worker Id.
+ */
+ id: number | undefined
+ /**
+ * Started flag.
+ */
+ started: boolean
+}
+
/**
* Worker usage statistics.
*
* Worker interface.
*/
export interface IWorker {
+ /**
+ * Worker Id.
+ */
+ id?: number
+ threadId?: number
/**
* Register an event listener.
*
* Worker node worker.
*/
readonly worker: Worker
+ /**
+ * Worker node worker info.
+ */
+ info: WorkerInfo
/**
* Worker node worker usage statistics.
*/
- workerUsage: WorkerUsage
+ usage: WorkerUsage
/**
* Worker node tasks queue.
*/
*/
export interface MessageValue<Data = unknown, ErrorData = unknown>
extends Task<Data> {
+ /**
+ * Worker Id.
+ */
+ readonly workerId?: number
/**
* Kill code.
*/
*/
readonly taskPerformance?: TaskPerformance
/**
- * Whether to compute the given statistics or not.
+ * Whether the worker computes the given statistics or not.
*/
readonly statistics?: WorkerStatistics
+ /**
+ * Whether the worker has started or not.
+ */
+ readonly started?: boolean
}
/**
Data = unknown,
Response = unknown
> extends AsyncResource {
+ /**
+ * Worker Id.
+ */
+ protected abstract id: number
/**
* Task function(s) processed by the worker when the pool's `execution` function is invoked.
*/
this.sendToMainWorker({
data: res,
taskPerformance,
+ workerId: this.id,
id: message.id
})
} catch (e) {
message: err,
data: message.data
},
+ workerId: this.id,
id: message.id
})
} finally {
this.sendToMainWorker({
data: res,
taskPerformance,
+ workerId: this.id,
id: message.id
})
return null
message: err,
data: message.data
},
+ workerId: this.id,
id: message.id
})
})
cluster.worker as Worker,
opts
)
+ if (!this.isMain) {
+ this.sendToMainWorker({ workerId: this.id, started: true })
+ }
+ }
+
+ /** @inheritDoc */
+ protected get id (): number {
+ return this.getMainWorker().id
}
/** @inheritDoc */
protected sendToMainWorker (message: MessageValue<Response>): void {
+ console.log('sending message to main worker(cluster)', message)
this.getMainWorker().send(message)
}
-import { type MessagePort, isMainThread, parentPort } from 'node:worker_threads'
+import {
+ type MessagePort,
+ isMainThread,
+ parentPort,
+ threadId
+} from 'node:worker_threads'
import type { MessageValue } from '../utility-types'
import { AbstractWorker } from './abstract-worker'
import type { WorkerOptions } from './worker-options'
parentPort as MessagePort,
opts
)
+ if (!this.isMain) {
+ this.sendToMainWorker({ workerId: this.id, started: true })
+ }
+ }
+
+ protected get id (): number {
+ return threadId
}
/** @inheritDoc */
protected sendToMainWorker (message: MessageValue<Response>): void {
+ console.log('sending message to main worker(thread)', message)
this.getMainWorker().postMessage(message)
}
}
maxQueuedTasks: 0,
failedTasks: 0
})
+ for (const workerNode of pool.workerNodes) {
+ console.log('thread:workerNode.info', workerNode.info)
+ }
await pool.destroy()
pool = new DynamicClusterPool(
numberOfWorkers,
numberOfWorkers * 2,
- './tests/worker-files/thread/testWorker.js'
+ './tests/worker-files/cluster/testWorker.js'
)
expect(pool.info).toStrictEqual({
type: PoolTypes.dynamic,
maxQueuedTasks: 0,
failedTasks: 0
})
+ for (const workerNode of pool.workerNodes) {
+ console.log('cluster:workerNode.info', workerNode.info)
+ }
await pool.destroy()
})
it('Simulate worker not found', async () => {
const pool = new StubPoolWithRemoveAllWorker(
numberOfWorkers,
- './tests/worker-files/cluster/testWorker.js',
+ './tests/worker-files/thread/testWorker.js',
{
errorHandler: e => console.error(e)
}
'./tests/worker-files/cluster/testWorker.js'
)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: 0,
executing: 0,
promises.add(pool.execute())
}
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: 0,
executing: maxMultiplier,
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: maxMultiplier,
executing: 0,
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThan(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
- maxMultiplier
- )
+ expect(workerNode.usage.tasks.executed).toBeGreaterThan(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(maxMultiplier)
}
pool.setWorkerChoiceStrategy(WorkerChoiceStrategies.FAIR_SHARE)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: 0,
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.runTime.history.length).toBe(0)
- expect(workerNode.workerUsage.waitTime.history.length).toBe(0)
+ expect(workerNode.usage.runTime.history.length).toBe(0)
+ expect(workerNode.usage.waitTime.history.length).toBe(0)
}
await pool.destroy()
})
}
expect(promises.size).toBe(numberOfWorkers * maxMultiplier)
for (const workerNode of queuePool.workerNodes) {
- expect(workerNode.workerUsage.tasks.executing).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executing).toBeLessThanOrEqual(
queuePool.opts.tasksQueueOptions.concurrency
)
- expect(workerNode.workerUsage.tasks.executed).toBe(0)
- expect(workerNode.workerUsage.tasks.queued).toBeGreaterThan(0)
- expect(workerNode.workerUsage.tasks.maxQueued).toBeGreaterThan(0)
+ expect(workerNode.usage.tasks.executed).toBe(0)
+ expect(workerNode.usage.tasks.queued).toBeGreaterThan(0)
+ expect(workerNode.usage.tasks.maxQueued).toBeGreaterThan(0)
}
expect(queuePool.info.executingTasks).toBe(numberOfWorkers)
expect(queuePool.info.queuedTasks).toBe(
)
await Promise.all(promises)
for (const workerNode of queuePool.workerNodes) {
- expect(workerNode.workerUsage.tasks.executing).toBe(0)
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThan(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
- maxMultiplier
- )
- expect(workerNode.workerUsage.tasks.queued).toBe(0)
- expect(workerNode.workerUsage.tasks.maxQueued).toBe(1)
+ expect(workerNode.usage.tasks.executing).toBe(0)
+ expect(workerNode.usage.tasks.executed).toBeGreaterThan(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(maxMultiplier)
+ expect(workerNode.usage.tasks.queued).toBe(0)
+ expect(workerNode.usage.tasks.maxQueued).toBe(1)
}
})
})
expect(
errorPool.workerNodes.some(
- workerNode => workerNode.workerUsage.tasks.failed === 1
+ workerNode => workerNode.usage.tasks.failed === 1
)
).toBe(true)
})
})
expect(
asyncErrorPool.workerNodes.some(
- workerNode => workerNode.workerUsage.tasks.failed === 1
+ workerNode => workerNode.usage.tasks.failed === 1
)
).toBe(true)
})
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: maxMultiplier,
executing: 0,
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: maxMultiplier,
executing: 0,
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
}
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
}
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.waitTime.aggregate).toBeGreaterThanOrEqual(
- 0
- )
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.waitTime.aggregate).toBeGreaterThanOrEqual(0)
}
// We need to clean up the resources after our test
await pool.destroy()
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.waitTime.aggregate).toBeGreaterThanOrEqual(
- 0
- )
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.waitTime.aggregate).toBeGreaterThanOrEqual(0)
}
// We need to clean up the resources after our test
await pool.destroy()
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: expect.any(Number)
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.elu.utilization).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeLessThanOrEqual(1)
+ expect(workerNode.usage.elu.utilization).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeLessThanOrEqual(1)
}
// We need to clean up the resources after our test
await pool.destroy()
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: expect.any(Number)
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.elu.utilization).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeLessThanOrEqual(1)
+ expect(workerNode.usage.elu.utilization).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeLessThanOrEqual(1)
}
// We need to clean up the resources after our test
await pool.destroy()
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: expect.any(Number)
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.runTime.average).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeLessThanOrEqual(1)
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.runTime.average).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeLessThanOrEqual(1)
}
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: expect.any(Number)
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.runTime.average).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeLessThanOrEqual(1)
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.runTime.average).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeLessThanOrEqual(1)
}
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: expect.any(Number)
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.runTime.median).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.elu.utilization).toBeLessThanOrEqual(1)
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.runTime.median).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.elu.utilization).toBeLessThanOrEqual(1)
}
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.runTime.average).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.runTime.average).toBeGreaterThanOrEqual(0)
}
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThan(0)
- expect(workerNode.workerUsage.runTime.average).toBeGreaterThan(0)
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThan(0)
+ expect(workerNode.usage.runTime.average).toBeGreaterThan(0)
}
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: expect.any(Number),
executing: 0,
utilization: 0
}
})
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThanOrEqual(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executed).toBeGreaterThanOrEqual(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(
max * maxMultiplier
)
- expect(workerNode.workerUsage.runTime.aggregate).toBeGreaterThan(0)
- expect(workerNode.workerUsage.runTime.median).toBeGreaterThan(0)
+ expect(workerNode.usage.runTime.aggregate).toBeGreaterThan(0)
+ expect(workerNode.usage.runTime.median).toBeGreaterThan(0)
}
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: maxMultiplier,
executing: 0,
}
await Promise.all(promises)
for (const workerNode of pool.workerNodes) {
- expect(workerNode.workerUsage).toStrictEqual({
+ expect(workerNode.usage).toStrictEqual({
tasks: {
executed: maxMultiplier,
executing: 0,
}
expect(promises.size).toBe(numberOfThreads * maxMultiplier)
for (const workerNode of queuePool.workerNodes) {
- expect(workerNode.workerUsage.tasks.executing).toBeLessThanOrEqual(
+ expect(workerNode.usage.tasks.executing).toBeLessThanOrEqual(
queuePool.opts.tasksQueueOptions.concurrency
)
- expect(workerNode.workerUsage.tasks.executed).toBe(0)
- expect(workerNode.workerUsage.tasks.queued).toBeGreaterThan(0)
- expect(workerNode.workerUsage.tasks.maxQueued).toBeGreaterThan(0)
+ expect(workerNode.usage.tasks.executed).toBe(0)
+ expect(workerNode.usage.tasks.queued).toBeGreaterThan(0)
+ expect(workerNode.usage.tasks.maxQueued).toBeGreaterThan(0)
}
expect(queuePool.info.executingTasks).toBe(numberOfThreads)
expect(queuePool.info.queuedTasks).toBe(
)
await Promise.all(promises)
for (const workerNode of queuePool.workerNodes) {
- expect(workerNode.workerUsage.tasks.executing).toBe(0)
- expect(workerNode.workerUsage.tasks.executed).toBeGreaterThan(0)
- expect(workerNode.workerUsage.tasks.executed).toBeLessThanOrEqual(
- maxMultiplier
- )
- expect(workerNode.workerUsage.tasks.queued).toBe(0)
- expect(workerNode.workerUsage.tasks.maxQueued).toBe(1)
+ expect(workerNode.usage.tasks.executing).toBe(0)
+ expect(workerNode.usage.tasks.executed).toBeGreaterThan(0)
+ expect(workerNode.usage.tasks.executed).toBeLessThanOrEqual(maxMultiplier)
+ expect(workerNode.usage.tasks.queued).toBe(0)
+ expect(workerNode.usage.tasks.maxQueued).toBe(1)
}
})
})
expect(
errorPool.workerNodes.some(
- workerNode => workerNode.workerUsage.tasks.failed === 1
+ workerNode => workerNode.usage.tasks.failed === 1
)
).toBe(true)
})
})
expect(
asyncErrorPool.workerNodes.some(
- workerNode => workerNode.workerUsage.tasks.failed === 1
+ workerNode => workerNode.usage.tasks.failed === 1
)
).toBe(true)
})
})
it('Verify that thread pool options are checked', async () => {
- const workerFilePath = './tests/worker-files/cluster/testWorker.js'
+ const workerFilePath = './tests/worker-files/thread/testWorker.js'
let pool1 = new FixedThreadPool(numberOfThreads, workerFilePath)
expect(pool1.opts.workerOptions).toBeUndefined()
await pool1.destroy()