]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
fix(reforcexy): load previous best params at hyperopt only if needed
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Fri, 24 Oct 2025 19:43:10 +0000 (21:43 +0200)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Fri, 24 Oct 2025 19:43:10 +0000 (21:43 +0200)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
ReforceXY/user_data/freqaimodels/ReforceXY.py

index 4b3be9ab506800a0d9d2578c2fdfdbd71ec4bb4c..70abc6151a8c93a4adcc9049c3e89c4cebdac2e9 100644 (file)
@@ -295,21 +295,24 @@ class ReforceXY(BaseReinforcementLearningModel):
         model_reward_parameters = rl_cfg.setdefault("model_reward_parameters", {})
 
         gamma: Optional[float] = None
-        best_trial_params: Optional[Dict[str, Any]] = None
-        if self.hyperopt:
-            best_trial_params = self.load_best_trial_params(pair)
 
         if model_params and isinstance(model_params.get("gamma"), (int, float)):
             gamma = float(model_params.get("gamma"))
-        elif best_trial_params and isinstance(
-            best_trial_params.get("gamma"), (int, float)
-        ):
-            gamma = float(best_trial_params.get("gamma"))
-        elif hasattr(self.model, "gamma") and isinstance(
-            self.model.gamma, (int, float)
+        elif self.hyperopt:
+            best_trial_params = self.load_best_trial_params(pair)
+            if best_trial_params and isinstance(
+                best_trial_params.get("gamma"), (int, float)
+            ):
+                gamma = float(best_trial_params.get("gamma"))
+
+        if (
+            gamma is None
+            and hasattr(self.model, "gamma")
+            and isinstance(self.model.gamma, (int, float))
         ):
             gamma = float(self.model.gamma)
-        else:
+
+        if gamma is None:
             model_params_gamma = self.get_model_params().get("gamma")
             if isinstance(model_params_gamma, (int, float)):
                 gamma = float(model_params_gamma)