]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
refactor: remove now unneeded debug code and improve logging messages
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Sat, 27 Dec 2025 12:57:34 +0000 (13:57 +0100)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Sat, 27 Dec 2025 12:57:34 +0000 (13:57 +0100)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
ReforceXY/reward_space_analysis/reward_space_analysis.py
quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py
quickadapter/user_data/strategies/QuickAdapterV3.py
quickadapter/user_data/strategies/Utils.py

index 25eb84dc4ba7508070ac261cdbaac6509a8c75ec..3d1cf2982926a15458a171f21466c2cd261f539a 100644 (file)
@@ -310,8 +310,8 @@ def _warn_unknown_mode(
     """
     valid_sorted = sorted(valid_values)
     warnings.warn(
-        f"Param: unknown {mode_type} '{provided_value}'. "
-        f"Expected one of: {valid_sorted}; falling back to '{fallback_value}'.",
+        f"Param: unknown {mode_type} '{provided_value}', "
+        f"expected one of: {valid_sorted}; falling back to '{fallback_value}'",
         RewardDiagnosticsWarning,
         stacklevel=stacklevel,
     )
@@ -3626,7 +3626,7 @@ def write_complete_statistical_analysis(
     analysis_stats = None
     partial_deps = {}
     if skip_feature_analysis or len(df) < 4:
-        print("Skipping feature analysis: insufficient samples or flag set.")
+        print("Skipping feature analysis: insufficient samples or flag set")
         # Do NOT create feature_importance.csv when skipped (tests expect absence)
         # Create minimal partial dependence placeholders only if feature analysis was NOT explicitly skipped
         if not skip_feature_analysis and not skip_partial_dependence:
@@ -3659,7 +3659,7 @@ def write_complete_statistical_analysis(
                         f"{feature},partial_dependence\n", encoding="utf-8"
                     )
         except ImportError:
-            print("Skipping feature analysis: scikit-learn unavailable.")
+            print("Skipping feature analysis: scikit-learn unavailable")
             (output_dir / "feature_importance.csv").write_text(
                 "feature,importance_mean,importance_std\n", encoding="utf-8"
             )
@@ -4517,7 +4517,7 @@ def main() -> None:
     except Exception as e:
         print(f"Manifest generation failed: {e}")
 
-    print(f"Generated {len(df):,} synthetic samples.")
+    print(f"Generated {len(df):,} synthetic samples")
     print(sample_output_message)
     print(f"Artifacts saved to: {args.out_dir.resolve()}")
 
index 25b0a86e243d86a2cef0ae30d3b9418c00b7e999..9e1701c69c4434b0483721c1cc8943ac1bc38ff5 100644 (file)
@@ -49,8 +49,6 @@ SkimageThresholdMethod = Literal[
 ]
 ThresholdMethod = Union[SkimageThresholdMethod, CustomThresholdMethod]
 
-debug = False
-
 warnings.simplefilter(action="ignore", category=FutureWarning)
 
 logger = logging.getLogger(__name__)
@@ -300,7 +298,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             else:
                 logger.warning(
                     f"Invalid string value for label_frequency_candles {label_frequency_candles!r}, "
-                    f"only 'auto' is supported, using default {default_label_frequency_candles}"
+                    f"only 'auto' is supported, using default {default_label_frequency_candles!r}"
                 )
                 label_frequency_candles = default_label_frequency_candles
         elif isinstance(label_frequency_candles, (int, float)):
@@ -315,7 +313,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
         else:
             logger.warning(
                 f"Invalid type for label_frequency_candles {type(label_frequency_candles).__name__!r}, "
-                f"expected int, float, or 'auto', using default {default_label_frequency_candles}"
+                f"expected int, float, or 'auto', using default {default_label_frequency_candles!r}"
             )
             label_frequency_candles = default_label_frequency_candles
 
@@ -875,12 +873,15 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
     def set_optuna_label_candle(self, pair: str) -> None:
         if len(self._optuna_label_candle_pool) == 0:
             logger.warning(
-                f"[{pair}] Optuna label candle pool is empty, reinitializing it ("
-                f"{self._optuna_label_candle_pool=} ,"
-                f"{self._optuna_label_candle_pool_full=} ,"
-                f"{self._optuna_label_candle.values()=} ,"
-                f"{self._optuna_label_candles.values()=} ,"
-                f"{self._optuna_label_incremented_pairs=})"
+                f"[{pair}] Optuna label candle pool is empty, reinitializing"
+            )
+            logger.debug(
+                f"[{pair}] Optuna label candle pool state: "
+                f"pool={self._optuna_label_candle_pool}, "
+                f"pool_full={self._optuna_label_candle_pool_full}, "
+                f"candle={list(self._optuna_label_candle.values())}, "
+                f"candles={list(self._optuna_label_candles.values())}, "
+                f"incremented_pairs={self._optuna_label_incremented_pairs}"
             )
             self.init_optuna_label_candle_pool()
         optuna_label_candle_pool = copy.deepcopy(self._optuna_label_candle_pool)
@@ -971,6 +972,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 objective=lambda trial: train_objective(
                     trial,
                     self.regressor,
+                    dk.pair,
                     X,
                     y,
                     train_weights,
@@ -2477,7 +2479,8 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             optuna.delete_study(study_name=study_name, storage=storage)
         except Exception as e:
             logger.warning(
-                f"Optuna study {study_name} deletion failed: {e!r}", exc_info=True
+                f"Optuna study deletion failed for study {study_name}: {e!r}",
+                exc_info=True,
             )
 
     @staticmethod
@@ -2514,6 +2517,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
 def train_objective(
     trial: optuna.trial.Trial,
     regressor: Regressor,
+    pair: str,
     X: pd.DataFrame,
     y: pd.DataFrame,
     train_weights: NDArray[np.floating],
@@ -2527,17 +2531,10 @@ def train_objective(
 ) -> float:
     test_ok = True
     test_length = len(X_test)
-    if debug:
-        test_extrema = y_test.get(EXTREMA_COLUMN)
-        n_test_extrema: int = calculate_n_extrema(test_extrema)
-        min_test_extrema: int = calculate_min_extrema(
-            test_length, fit_live_predictions_candles
-        )
-        logger.info(f"{test_length=}, {n_test_extrema=}, {min_test_extrema=}")
     min_test_period_candles: int = fit_live_predictions_candles * 4
     if test_length < min_test_period_candles:
         logger.warning(
-            f"Insufficient test data: {test_length} < {min_test_period_candles}"
+            f"[{pair}] Optuna train | Insufficient test data: {test_length} < {min_test_period_candles}"
         )
         return np.inf
     max_test_period_candles: int = test_length
@@ -2555,28 +2552,20 @@ def train_objective(
         test_period_candles, fit_live_predictions_candles
     )
     if n_test_extrema < min_test_extrema:
-        if debug:
-            logger.warning(
-                f"Insufficient extrema in test data with {test_period_candles=}: {n_test_extrema=} < {min_test_extrema=}"
-            )
+        logger.debug(
+            f"[{pair}] Optuna train | Insufficient extrema in test data with {test_period_candles=}: {n_test_extrema=} < {min_test_extrema=}"
+        )
         test_ok = False
     test_weights = test_weights[-test_period_candles:]
 
     train_ok = True
     train_length = len(X)
-    if debug:
-        train_extrema = y.get(EXTREMA_COLUMN)
-        n_train_extrema: int = calculate_n_extrema(train_extrema)
-        min_train_extrema: int = calculate_min_extrema(
-            train_length, fit_live_predictions_candles
-        )
-        logger.info(f"{train_length=}, {n_train_extrema=}, {min_train_extrema=}")
     min_train_period_candles: int = min_test_period_candles * int(
         round(1 / test_size - 1)
     )
     if train_length < min_train_period_candles:
         logger.warning(
-            f"Insufficient train data: {train_length} < {min_train_period_candles}"
+            f"[{pair}] Optuna train | Insufficient train data: {train_length} < {min_train_period_candles}"
         )
         return np.inf
     max_train_period_candles: int = train_length
@@ -2594,10 +2583,9 @@ def train_objective(
         train_period_candles, fit_live_predictions_candles
     )
     if n_train_extrema < min_train_extrema:
-        if debug:
-            logger.warning(
-                f"Insufficient extrema in train data with {train_period_candles=}: {n_train_extrema=} < {min_train_extrema=}"
-            )
+        logger.debug(
+            f"[{pair}] Optuna train | Insufficient extrema in train data with {train_period_candles=}: {n_train_extrema=} < {min_train_extrema=}"
+        )
         train_ok = False
     train_weights = train_weights[-train_period_candles:]
 
index 33a0600878d67360621d1b98a6a9951fdfe3f102..48d94757b802eade60bc0b08b7475c4f29497fe3 100644 (file)
@@ -44,7 +44,6 @@ from Utils import (
     WEIGHT_STRATEGIES,
     alligator,
     bottom_change_percent,
-    calculate_n_extrema,
     calculate_quantile,
     ewo,
     format_number,
@@ -75,8 +74,6 @@ CandleDeviationCacheKey = tuple[
 ]
 CandleThresholdCacheKey = tuple[str, DfSignature, str, int, float, float]
 
-debug = False
-
 logger = logging.getLogger(__name__)
 
 
@@ -1165,11 +1162,6 @@ class QuickAdapterV3(IStrategy):
             self.extrema_smoothing["sigma"],
         )
 
-        if debug:
-            extrema = dataframe[EXTREMA_COLUMN]
-            logger.debug(f"{extrema.to_numpy()=}")
-            n_extrema: int = calculate_n_extrema(extrema)
-            logger.debug(f"{n_extrema=}")
         return dataframe
 
     def populate_indicators(
@@ -2449,7 +2441,7 @@ class QuickAdapterV3(IStrategy):
             pair=pair, timeframe=self.config.get("timeframe")
         )
         if df.empty:
-            logger.warning(
+            logger.info(
                 f"[{pair}] Denied {side} {QuickAdapterV3._ORDER_TYPES[0]}: dataframe is empty"
             )
             return False
index a33eb97632f4926b6fe3f1921f101580d3abe6fb..00f44fa1b39b8a85540706893e382341ec908f4d 100644 (file)
@@ -2359,7 +2359,7 @@ def validate_range(
             or (finite_only and not np.isfinite(value))
             or (non_negative and value < 0)
         ):
-            logger.warning(f"Invalid {name} {value!r}, using default {default_value}")
+            logger.warning(f"Invalid {name} {value!r}, using default {default_value!r}")
             return default_value
         return value
 
@@ -2373,7 +2373,7 @@ def validate_range(
     )
     if not ordering_ok:
         logger.warning(
-            f"Invalid {name} ordering ({min_name}={sanitized_min}, {max_name}={sanitized_max}), using defaults ({default_min}, {default_max})"
+            f"Invalid {name} ordering ({min_name}={sanitized_min!r}, {max_name}={sanitized_max!r}), using defaults ({default_min!r}, {default_max!r})"
         )
         sanitized_min, sanitized_max = default_min, default_max