]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
refactor(reforcexy): ensure HPO trial start is always logged
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Sat, 20 Sep 2025 20:07:27 +0000 (22:07 +0200)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Sat, 20 Sep 2025 20:07:27 +0000 (22:07 +0200)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
ReforceXY/user_data/freqaimodels/ReforceXY.py

index e1843a5ddbfa94879620b98c1988de6f31152932..67e6fca456e7303f642856a3ce8362de51bce58f 100644 (file)
@@ -937,6 +937,8 @@ class ReforceXY(BaseReinforcementLearningModel):
         """
         Defines a single trial for hyperparameter optimization using Optuna
         """
+        logger.info("------------ Hyperopt trial %d ------------", trial.number)
+
         if "PPO" in self.model_type:
             params = sample_params_ppo(trial, self.n_envs)
             if params.get("n_steps", 0) * self.n_envs > total_timesteps:
@@ -962,6 +964,8 @@ class ReforceXY(BaseReinforcementLearningModel):
         # Ensure that the sampled parameters take precedence
         params = deepmerge(self.get_model_params(), params)
 
+        logger.info("Trial %s params: %s", trial.number, params)
+
         nan_encountered = False
 
         if self.activate_tensorboard:
@@ -975,9 +979,6 @@ class ReforceXY(BaseReinforcementLearningModel):
         else:
             tensorboard_log_path = None
 
-        logger.info("------------ Hyperopt trial %d ------------", trial.number)
-        logger.info("Trial %s params: %s", trial.number, params)
-
         train_env, eval_env = self._get_train_and_eval_environments(
             train_df, test_df, dk, trial=trial
         )