"max_training_drawdown_pct": 0.02,
"max_trade_duration_candles": 96, // Timeout exit value used with force_actions
"force_actions": false, // Utilize minimal_roi, stoploss, and max_trade_duration_candles as TP/SL/Timeout in the environment
- "n_envs": 32, // Number of DummyVecEnv environments
+ "n_envs": 32, // Number of DummyVecEnv or SubProcVecEnv environments
+ "multiprocessing": false, // Use SubprocVecEnv if n_envs>1 (otherwise DummyVecEnv)
"frame_stacking": 2, // Number of VecFrameStack stacks (set > 1 to use)
"lr_schedule": false, // Enable learning rate linear schedule
"cr_schedule": false, // Enable clip range linear schedule
if total_timesteps is not None and not np.isclose(total_timesteps, 0.0):
try:
progress_done = float(self.num_timesteps) / float(total_timesteps)
- progress_done = (
- 0.0
- if progress_done < 0
- else (1.0 if progress_done > 1.0 else progress_done)
- )
+ progress_done = np.clip(progress_done, 0.0, 1.0)
except Exception:
progress_done = 0.0
else: