### Added
- Support multiple task functions per worker.
+- Add custom worker weights support to worker choice strategies options.
### Changed
/** @inheritDoc */
public abstract get type (): PoolType
+ /** @inheritDoc */
+ public abstract get size (): number
+
/**
* Number of tasks running in the pool.
*/
return PoolType.DYNAMIC
}
+ /** @inheritDoc */
+ public get size (): number {
+ return this.max
+ }
+
/** @inheritDoc */
protected get full (): boolean {
return this.workerNodes.length === this.max
return PoolType.FIXED
}
+ /** @inheritDoc */
+ public get size (): number {
+ return this.numberOfWorkers
+ }
+
/** @inheritDoc */
protected get full (): boolean {
return this.workerNodes.length === this.numberOfWorkers
* If it is `'dynamic'`, it provides the `max` property.
*/
readonly type: PoolType
+ /**
+ * Pool maximum size.
+ */
+ readonly size: number
/**
* Pool worker nodes.
*/
import { DEFAULT_WORKER_CHOICE_STRATEGY_OPTIONS } from '../../utils'
-import { type IPool, PoolType } from '../pool'
+import type { IPool } from '../pool'
import type { IWorker } from '../worker'
import type {
IWorkerChoiceStrategy,
*/
private toggleFindLastFreeWorkerNodeKey: boolean = false
/** @inheritDoc */
- protected readonly isDynamicPool: boolean
- /** @inheritDoc */
public readonly requiredStatistics: RequiredStatistics = {
runTime: false,
avgRunTime: false,
protected readonly pool: IPool<Worker, Data, Response>,
protected opts: WorkerChoiceStrategyOptions = DEFAULT_WORKER_CHOICE_STRATEGY_OPTIONS
) {
- this.isDynamicPool = this.pool.type === PoolType.DYNAMIC
this.choose = this.choose.bind(this)
}
this.requiredStatistics.avgRunTime = true
this.requiredStatistics.medRunTime = opts.medRunTime as boolean
}
+ if (
+ opts.weights != null &&
+ Object.keys(opts.weights).length < this.pool.size
+ ) {
+ throw new Error(
+ 'Worker choice strategy options must have a weight for each worker node.'
+ )
+ }
}
/** @inheritDoc */
}
/**
- * Worker last virtual task execution timestamp.
+ * Workers' virtual task execution timestamp.
*/
- private readonly workerLastVirtualTaskTimestamp: Map<
- number,
- WorkerVirtualTaskTimestamp
- > = new Map<number, WorkerVirtualTaskTimestamp>()
+ private workersVirtualTaskTimestamp: WorkerVirtualTaskTimestamp[] = []
/** @inheritDoc */
public constructor (
/** @inheritDoc */
public reset (): boolean {
- this.workerLastVirtualTaskTimestamp.clear()
+ this.workersVirtualTaskTimestamp = []
return true
}
public choose (): number {
let minWorkerVirtualTaskEndTimestamp = Infinity
let chosenWorkerNodeKey!: number
- for (const [index] of this.pool.workerNodes.entries()) {
- this.computeWorkerLastVirtualTaskTimestamp(index)
+ for (const [workerNodeKey] of this.pool.workerNodes.entries()) {
+ this.computeWorkerVirtualTaskTimestamp(workerNodeKey)
const workerLastVirtualTaskEndTimestamp =
- this.workerLastVirtualTaskTimestamp.get(index)?.end ?? 0
+ this.workersVirtualTaskTimestamp[workerNodeKey]?.end ?? 0
if (
workerLastVirtualTaskEndTimestamp < minWorkerVirtualTaskEndTimestamp
) {
minWorkerVirtualTaskEndTimestamp = workerLastVirtualTaskEndTimestamp
- chosenWorkerNodeKey = index
+ chosenWorkerNodeKey = workerNodeKey
}
}
return chosenWorkerNodeKey
/** @inheritDoc */
public remove (workerNodeKey: number): boolean {
- const deleted = this.workerLastVirtualTaskTimestamp.delete(workerNodeKey)
- for (const [key, value] of this.workerLastVirtualTaskTimestamp) {
- if (key > workerNodeKey) {
- this.workerLastVirtualTaskTimestamp.set(key - 1, value)
- }
- }
- return deleted
+ this.workersVirtualTaskTimestamp.splice(workerNodeKey, 1)
+ return true
}
/**
- * Computes worker last virtual task timestamp.
+ * Computes worker virtual task timestamp.
*
* @param workerNodeKey - The worker node key.
*/
- private computeWorkerLastVirtualTaskTimestamp (workerNodeKey: number): void {
+ private computeWorkerVirtualTaskTimestamp (workerNodeKey: number): void {
const workerVirtualTaskStartTimestamp = Math.max(
performance.now(),
- this.workerLastVirtualTaskTimestamp.get(workerNodeKey)?.end ?? -Infinity
+ this.workersVirtualTaskTimestamp[workerNodeKey]?.end ?? -Infinity
)
const workerVirtualTaskTRunTime = this.requiredStatistics.medRunTime
? this.pool.workerNodes[workerNodeKey].tasksUsage.medRunTime
: this.pool.workerNodes[workerNodeKey].tasksUsage.avgRunTime
- this.workerLastVirtualTaskTimestamp.set(workerNodeKey, {
+ this.workersVirtualTaskTimestamp[workerNodeKey] = {
start: workerVirtualTaskStartTimestamp,
end: workerVirtualTaskStartTimestamp + (workerVirtualTaskTRunTime ?? 0)
- })
+ }
}
}
}
let minRunTime = Infinity
let lessBusyWorkerNodeKey!: number
- for (const [index, workerNode] of this.pool.workerNodes.entries()) {
+ for (const [workerNodeKey, workerNode] of this.pool.workerNodes.entries()) {
const workerRunTime = workerNode.tasksUsage.runTime
if (workerRunTime === 0) {
- return index
+ return workerNodeKey
} else if (workerRunTime < minRunTime) {
minRunTime = workerRunTime
- lessBusyWorkerNodeKey = index
+ lessBusyWorkerNodeKey = workerNodeKey
}
}
return lessBusyWorkerNodeKey
}
let minNumberOfTasks = Infinity
let lessUsedWorkerNodeKey!: number
- for (const [index, workerNode] of this.pool.workerNodes.entries()) {
+ for (const [workerNodeKey, workerNode] of this.pool.workerNodes.entries()) {
const tasksUsage = workerNode.tasksUsage
const workerTasks = tasksUsage.run + tasksUsage.running
if (workerTasks === 0) {
- return index
+ return workerNodeKey
} else if (workerTasks < minNumberOfTasks) {
minNumberOfTasks = workerTasks
- lessUsedWorkerNodeKey = index
+ lessUsedWorkerNodeKey = workerNodeKey
}
}
return lessUsedWorkerNodeKey
* @defaultValue false
*/
medRunTime?: boolean
+ /**
+ * Worker weights to use for weighted round robin worker selection strategy.
+ * Weight is the tasks maximum average or median runtime in milliseconds.
+ *
+ * @defaultValue Computed worker weights automatically given the CPU performance.
+ */
+ weights?: Record<number, number>
}
/**
WorkerChoiceStrategyOptions
} from './selection-strategies-types'
-/**
- * Virtual task runtime.
- */
-interface TaskRunTime {
- weight: number
- runTime: number
-}
-
/**
* Selects the next worker with a weighted round robin scheduling algorithm.
* Loosely modeled after the weighted round robin queueing algorithm: https://en.wikipedia.org/wiki/Weighted_round_robin.
*/
private readonly defaultWorkerWeight: number
/**
- * Workers' virtual task runtime.
+ * Worker virtual task runtime.
*/
- private readonly workersTaskRunTime: Map<number, TaskRunTime> = new Map<
- number,
- TaskRunTime
- >()
+ private workerVirtualTaskRunTime: number = 0
/** @inheritDoc */
public constructor (
) {
super(pool, opts)
this.checkOptions(this.opts)
- this.defaultWorkerWeight = this.computeWorkerWeight()
- this.initWorkersTaskRunTime()
+ this.defaultWorkerWeight = this.computeDefaultWorkerWeight()
}
/** @inheritDoc */
public reset (): boolean {
this.currentWorkerNodeId = 0
- this.workersTaskRunTime.clear()
- this.initWorkersTaskRunTime()
+ this.workerVirtualTaskRunTime = 0
return true
}
/** @inheritDoc */
public choose (): number {
const chosenWorkerNodeKey = this.currentWorkerNodeId
- if (
- this.isDynamicPool &&
- !this.workersTaskRunTime.has(chosenWorkerNodeKey)
- ) {
- this.initWorkerTaskRunTime(chosenWorkerNodeKey)
- }
- const workerTaskRunTime =
- this.workersTaskRunTime.get(chosenWorkerNodeKey)?.runTime ?? 0
+ const workerTaskRunTime = this.workerVirtualTaskRunTime ?? 0
const workerTaskWeight =
- this.workersTaskRunTime.get(chosenWorkerNodeKey)?.weight ??
- this.defaultWorkerWeight
+ this.opts.weights?.[chosenWorkerNodeKey] ?? this.defaultWorkerWeight
if (workerTaskRunTime < workerTaskWeight) {
- this.setWorkerTaskRunTime(
- chosenWorkerNodeKey,
- workerTaskWeight,
+ this.workerVirtualTaskRunTime =
workerTaskRunTime +
- (this.getWorkerVirtualTaskRunTime(chosenWorkerNodeKey) ?? 0)
- )
+ (this.getWorkerVirtualTaskRunTime(chosenWorkerNodeKey) ?? 0)
} else {
this.currentWorkerNodeId =
this.currentWorkerNodeId === this.pool.workerNodes.length - 1
? 0
: this.currentWorkerNodeId + 1
- this.setWorkerTaskRunTime(this.currentWorkerNodeId, workerTaskWeight, 0)
+ this.workerVirtualTaskRunTime = 0
}
return chosenWorkerNodeKey
}
? this.pool.workerNodes.length - 1
: this.currentWorkerNodeId
}
+ this.workerVirtualTaskRunTime = 0
}
- const deleted = this.workersTaskRunTime.delete(workerNodeKey)
- for (const [key, value] of this.workersTaskRunTime) {
- if (key > workerNodeKey) {
- this.workersTaskRunTime.set(key - 1, value)
- }
- }
- return deleted
- }
-
- private initWorkersTaskRunTime (): void {
- for (const [index] of this.pool.workerNodes.entries()) {
- this.initWorkerTaskRunTime(index)
- }
- }
-
- private initWorkerTaskRunTime (workerNodeKey: number): void {
- this.setWorkerTaskRunTime(workerNodeKey, this.defaultWorkerWeight, 0)
- }
-
- private setWorkerTaskRunTime (
- workerNodeKey: number,
- weight: number,
- runTime: number
- ): void {
- this.workersTaskRunTime.set(workerNodeKey, {
- weight,
- runTime
- })
+ return true
}
private getWorkerVirtualTaskRunTime (workerNodeKey: number): number {
: this.pool.workerNodes[workerNodeKey].tasksUsage.avgRunTime
}
- private computeWorkerWeight (): number {
+ private computeDefaultWorkerWeight (): number {
let cpusCycleTimeWeight = 0
for (const cpu of cpus()) {
// CPU estimated cycle time
return this.workerNodes.length === this.max
}
+ /** @inheritDoc */
+ public get size (): number {
+ return this.max
+ }
+
/** @inheritDoc */
protected get busy (): boolean {
return this.full && this.internalBusy()
return PoolType.FIXED
}
+ /** @inheritDoc */
+ public get size (): number {
+ return this.numberOfWorkers
+ }
+
/** @inheritDoc */
protected get full (): boolean {
return this.workerNodes.length === this.numberOfWorkers
).nextWorkerNodeId
).toBe(0)
} else if (workerChoiceStrategy === WorkerChoiceStrategies.FAIR_SHARE) {
- for (const workerNodeKey of pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.keys()) {
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.get(workerNodeKey).start
- ).toBe(0)
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.get(workerNodeKey).end
- ).toBe(0)
- }
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp
+ ).toBeInstanceOf(Array)
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp.length
+ ).toBe(0)
} else if (
workerChoiceStrategy === WorkerChoiceStrategies.WEIGHTED_ROUND_ROBIN
) {
workerChoiceStrategy
).defaultWorkerWeight
).toBeGreaterThan(0)
- for (const workerNodeKey of pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(workerChoiceStrategy)
- .workersTaskRunTime.keys()) {
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(workerChoiceStrategy)
- .workersTaskRunTime.get(workerNodeKey).weight
- ).toBeGreaterThan(0)
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(workerChoiceStrategy)
- .workersTaskRunTime.get(workerNodeKey).runTime
- ).toBe(0)
- }
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workerVirtualTaskRunTime
+ ).toBe(0)
}
}
await pool.destroy()
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
pool.workerChoiceStrategyContext.workerChoiceStrategy
- ).workerLastVirtualTaskTimestamp.size
+ ).workersVirtualTaskTimestamp.length
).toBe(pool.workerNodes.length)
// We need to clean up the resources after our test
await pool.destroy()
promises.push(pool.execute())
}
await Promise.all(promises)
- // if (process.platform !== 'win32') {
- // expect(
- // pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
- // pool.workerChoiceStrategyContext.workerChoiceStrategy
- // ).workerLastVirtualTaskTimestamp.size
- // ).toBe(pool.workerNodes.length)
- // }
+ if (process.platform !== 'win32') {
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ pool.workerChoiceStrategyContext.workerChoiceStrategy
+ ).workersVirtualTaskTimestamp.length
+ ).toBe(pool.workerNodes.length)
+ }
// We need to clean up the resources after our test
await pool.destroy()
})
expect(workerNode.tasksUsage.medRunTime).toBeDefined()
expect(workerNode.tasksUsage.medRunTime).toBeGreaterThan(0)
}
- // if (process.platform !== 'win32') {
- // expect(
- // pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
- // pool.workerChoiceStrategyContext.workerChoiceStrategy
- // ).workerLastVirtualTaskTimestamp.size
- // ).toBe(pool.workerNodes.length)
- // }
+ if (process.platform !== 'win32') {
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ pool.workerChoiceStrategyContext.workerChoiceStrategy
+ ).workersVirtualTaskTimestamp.length
+ ).toBe(pool.workerNodes.length)
+ }
// We need to clean up the resources after our test
await pool.destroy()
})
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
workerChoiceStrategy
- ).workerLastVirtualTaskTimestamp
- ).toBeDefined()
+ ).workersVirtualTaskTimestamp
+ ).toBeInstanceOf(Array)
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp.length
+ ).toBe(0)
pool.setWorkerChoiceStrategy(workerChoiceStrategy)
- for (const workerNodeKey of pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.keys()) {
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.get(workerNodeKey).start
- ).toBe(0)
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.get(workerNodeKey).end
- ).toBe(0)
- }
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp
+ ).toBeInstanceOf(Array)
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp[0] = 0
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp.length
+ ).toBe(1)
await pool.destroy()
pool = new DynamicThreadPool(
min,
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
workerChoiceStrategy
- ).workerLastVirtualTaskTimestamp
- ).toBeDefined()
+ ).workersVirtualTaskTimestamp
+ ).toBeInstanceOf(Array)
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp[0] = 0
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp.length
+ ).toBe(1)
pool.setWorkerChoiceStrategy(workerChoiceStrategy)
- for (const workerNodeKey of pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.keys()) {
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.get(workerNodeKey).start
- ).toBe(0)
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workerLastVirtualTaskTimestamp.get(workerNodeKey).end
- ).toBe(0)
- }
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp
+ ).toBeInstanceOf(Array)
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workersVirtualTaskTimestamp.length
+ ).toBe(0)
// We need to clean up the resources after our test
await pool.destroy()
})
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
pool.workerChoiceStrategyContext.workerChoiceStrategy
- ).workersTaskRunTime.size
- ).toBe(pool.workerNodes.length)
+ ).defaultWorkerWeight
+ ).toBeGreaterThan(0)
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ pool.workerChoiceStrategyContext.workerChoiceStrategy
+ ).workerVirtualTaskRunTime
+ ).toBeGreaterThanOrEqual(0)
// We need to clean up the resources after our test
await pool.destroy()
})
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
pool.workerChoiceStrategyContext.workerChoiceStrategy
- ).workersTaskRunTime.size
- ).toBe(pool.workerNodes.length)
+ ).defaultWorkerWeight
+ ).toBeGreaterThan(0)
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ pool.workerChoiceStrategyContext.workerChoiceStrategy
+ ).workerVirtualTaskRunTime
+ ).toBeGreaterThanOrEqual(0)
}
// We need to clean up the resources after our test
await pool.destroy()
expect(workerNode.tasksUsage.medRunTime).toBeDefined()
expect(workerNode.tasksUsage.medRunTime).toBeGreaterThan(0)
}
- // if (process.platform !== 'win32') {
- // expect(
- // pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
- // pool.workerChoiceStrategyContext.workerChoiceStrategy
- // ).workersTaskRunTime.size
- // ).toBe(pool.workerNodes.length)
- // }
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ pool.workerChoiceStrategyContext.workerChoiceStrategy
+ ).defaultWorkerWeight
+ ).toBeGreaterThan(0)
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ pool.workerChoiceStrategyContext.workerChoiceStrategy
+ ).workerVirtualTaskRunTime
+ ).toBeGreaterThanOrEqual(0)
// We need to clean up the resources after our test
await pool.destroy()
})
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
workerChoiceStrategy
- ).workersTaskRunTime
+ ).workerVirtualTaskRunTime
).toBeDefined()
pool.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategy
).defaultWorkerWeight
).toBeGreaterThan(0)
- for (const workerNodeKey of pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workersTaskRunTime.keys()) {
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workersTaskRunTime.get(workerNodeKey).runTime
- ).toBe(0)
- }
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workerVirtualTaskRunTime
+ ).toBe(0)
await pool.destroy()
pool = new DynamicThreadPool(
min,
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
workerChoiceStrategy
- ).workersTaskRunTime
+ ).workerVirtualTaskRunTime
).toBeDefined()
pool.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
pool.workerChoiceStrategyContext.workerChoiceStrategy
).defaultWorkerWeight
).toBeGreaterThan(0)
- for (const workerNodeKey of pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workersTaskRunTime.keys()) {
- expect(
- pool.workerChoiceStrategyContext.workerChoiceStrategies
- .get(pool.workerChoiceStrategyContext.workerChoiceStrategy)
- .workersTaskRunTime.get(workerNodeKey).runTime
- ).toBe(0)
- }
+ expect(
+ pool.workerChoiceStrategyContext.workerChoiceStrategies.get(
+ workerChoiceStrategy
+ ).workerVirtualTaskRunTime
+ ).toBe(0)
// We need to clean up the resources after our test
await pool.destroy()
})
it('Verify that reset() resets internals', () => {
const strategy = new WeightedRoundRobinWorkerChoiceStrategy(pool)
- strategy.currentWorkerId = TestUtils.generateRandomInteger()
- const workersTaskRunTimeClearStub = sinon
- .stub(strategy.workersTaskRunTime, 'clear')
- .returns()
- const initWorkersTaskRunTimeStub = sinon
- .stub(strategy, 'initWorkersTaskRunTime')
- .returns()
+ strategy.currentWorkerId = TestUtils.generateRandomInteger(
+ Number.MAX_SAFE_INTEGER,
+ 1
+ )
+ strategy.workerVirtualTaskRunTime = TestUtils.generateRandomInteger(
+ Number.MAX_SAFE_INTEGER,
+ 1
+ )
const resetResult = strategy.reset()
expect(resetResult).toBe(true)
expect(strategy.currentWorkerNodeId).toBe(0)
- expect(workersTaskRunTimeClearStub.calledOnce).toBe(true)
- expect(initWorkersTaskRunTimeStub.calledOnce).toBe(true)
+ expect(strategy.workerVirtualTaskRunTime).toBe(0)
})
})
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
fixedPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(false)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
workerChoiceStrategy
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
dynamicPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(true)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
workerChoiceStrategy
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
fixedPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(false)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
dynamicPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(true)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
fixedPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(false)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
dynamicPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(true)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
fixedPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(false)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
dynamicPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(true)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
fixedPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(false)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(
const workerChoiceStrategyContext = new WorkerChoiceStrategyContext(
dynamicPool
)
- expect(
- workerChoiceStrategyContext.workerChoiceStrategies.get(
- workerChoiceStrategy
- ).isDynamicPool
- ).toBe(true)
workerChoiceStrategyContext.setWorkerChoiceStrategy(workerChoiceStrategy)
expect(
workerChoiceStrategyContext.workerChoiceStrategies.get(