]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
fix(qav3): ensure optuna settings can't hog CPU ressources
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Mon, 24 Feb 2025 11:48:48 +0000 (12:48 +0100)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Mon, 24 Feb 2025 11:48:48 +0000 (12:48 +0100)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
ReforceXY/user_data/config-template.json
quickadapter/user_data/freqaimodels/LightGBMRegressorQuickAdapterV35.py
quickadapter/user_data/freqaimodels/XGBoostRegressorQuickAdapterV35.py

index 38438ccca853e5f40e5daba9f3e10a126fdc65cb..1de66d2df962cba48ad67dee49e04a7dc87220b8 100644 (file)
       "max_training_drawdown_pct": 0.02,
       "max_trade_duration_candles": 96, // Timeout exit value used with force_actions
       "force_actions": false, // Utilize minimal_roi, stoploss, and max_trade_duration_candles as TP/SL/Timeout in the environment
-      "n_envs": 1, // Number of DummyVecEnv environments
-      "frame_staking": 0, // Number of VecFrameStack stacks (set > 1 to use)
+      "n_envs": 32, // Number of DummyVecEnv environments
+      "frame_staking": 4, // Number of VecFrameStack stacks (set > 1 to use)
       "lr_schedule": false, // Enable learning rate linear schedule
       "cr_schedule": false, // Enable clip range linear schedule
       "max_no_improvement_evals": 0, // Maximum consecutive evaluations without a new best model
index 3fd426969f2b284e456a8f56cbc99316c010a5a7..ae470cecfe2182c6868dcd75c0f2080e11fd85cc 100644 (file)
@@ -325,6 +325,7 @@ class LightGBMRegressorQuickAdapterV35(BaseRegressionModel):
             storage=storage,
         )
         self.optuna_hp_enqueue_previous_best_trial(pair, study)
+        logger.info(f"Optuna {study_namespace} hyperopt started")
         start = time.time()
         try:
             study.optimize(
@@ -339,7 +340,10 @@ class LightGBMRegressorQuickAdapterV35(BaseRegressionModel):
                     self.model_training_parameters,
                 ),
                 n_trials=self.__optuna_config.get("n_trials", N_TRIALS),
-                n_jobs=self.__optuna_config.get("n_jobs", 1),
+                n_jobs=min(
+                    self.__optuna_config.get("n_jobs", 1),
+                    max(int(self.max_system_threads / 4), 1),
+                ),
                 timeout=self.__optuna_config.get("timeout", 3600),
                 gc_after_trial=True,
             )
@@ -397,6 +401,7 @@ class LightGBMRegressorQuickAdapterV35(BaseRegressionModel):
             storage=storage,
         )
         self.optuna_period_enqueue_previous_best_trial(pair, study)
+        logger.info(f"Optuna {study_namespace} hyperopt started")
         start = time.time()
         try:
             study.optimize(
@@ -414,7 +419,10 @@ class LightGBMRegressorQuickAdapterV35(BaseRegressionModel):
                     model_training_parameters,
                 ),
                 n_trials=self.__optuna_config.get("n_trials", N_TRIALS),
-                n_jobs=self.__optuna_config.get("n_jobs", 1),
+                n_jobs=min(
+                    self.__optuna_config.get("n_jobs", 1),
+                    max(int(self.max_system_threads / 4), 1),
+                ),
                 timeout=self.__optuna_config.get("timeout", 3600),
                 gc_after_trial=True,
             )
index bbc2b2848816862e901dd2bcdc77d820ca6f1b2c..d0fd9c4e9842c82ce9933855656d2678a0b5f206 100644 (file)
@@ -326,6 +326,7 @@ class XGBoostRegressorQuickAdapterV35(BaseRegressionModel):
             storage=storage,
         )
         self.optuna_hp_enqueue_previous_best_trial(pair, study)
+        logger.info(f"Optuna {study_namespace} hyperopt started")
         start = time.time()
         try:
             study.optimize(
@@ -340,7 +341,10 @@ class XGBoostRegressorQuickAdapterV35(BaseRegressionModel):
                     self.model_training_parameters,
                 ),
                 n_trials=self.__optuna_config.get("n_trials", N_TRIALS),
-                n_jobs=self.__optuna_config.get("n_jobs", 1),
+                n_jobs=min(
+                    self.__optuna_config.get("n_jobs", 1),
+                    max(int(self.max_system_threads / 4), 1),
+                ),
                 timeout=self.__optuna_config.get("timeout", 3600),
                 gc_after_trial=True,
             )
@@ -398,6 +402,7 @@ class XGBoostRegressorQuickAdapterV35(BaseRegressionModel):
             storage=storage,
         )
         self.optuna_period_enqueue_previous_best_trial(pair, study)
+        logger.info(f"Optuna {study_namespace} hyperopt started")
         start = time.time()
         try:
             study.optimize(
@@ -415,7 +420,10 @@ class XGBoostRegressorQuickAdapterV35(BaseRegressionModel):
                     model_training_parameters,
                 ),
                 n_trials=self.__optuna_config.get("n_trials", N_TRIALS),
-                n_jobs=self.__optuna_config.get("n_jobs", 1),
+                n_jobs=min(
+                    self.__optuna_config.get("n_jobs", 1),
+                    max(int(self.max_system_threads / 4), 1),
+                ),
                 timeout=self.__optuna_config.get("timeout", 3600),
                 gc_after_trial=True,
             )