]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
refactor(qav3): harmonize logging messages
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Sat, 27 Dec 2025 12:00:40 +0000 (13:00 +0100)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Sat, 27 Dec 2025 12:00:40 +0000 (13:00 +0100)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py
quickadapter/user_data/strategies/QuickAdapterV3.py
quickadapter/user_data/strategies/Utils.py

index 3240a86666822578d589d2c7f11f8f02123ab50f..e909e01b6bc804a5869e445d21823bd0c2a18eb8 100644 (file)
@@ -299,7 +299,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 label_frequency_candles = default_label_frequency_candles
             else:
                 logger.warning(
-                    f"Invalid string value for label_frequency_candles: '{label_frequency_candles}', "
+                    f"Invalid string value for label_frequency_candles {label_frequency_candles!r}, "
                     f"only 'auto' is supported, using default {default_label_frequency_candles}"
                 )
                 label_frequency_candles = default_label_frequency_candles
@@ -308,13 +308,13 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 label_frequency_candles = int(label_frequency_candles)
             else:
                 logger.warning(
-                    f"Invalid numeric value for label_frequency_candles: {label_frequency_candles}, "
-                    f"must be between 2 and 10000, using default {default_label_frequency_candles}"
+                    f"Invalid numeric value for label_frequency_candles: {label_frequency_candles!r}, "
+                    f"must be between 2 and 10000, using default {default_label_frequency_candles!r}"
                 )
                 label_frequency_candles = default_label_frequency_candles
         else:
             logger.warning(
-                f"Invalid type for label_frequency_candles: {type(label_frequency_candles).__name__}, "
+                f"Invalid type for label_frequency_candles {type(label_frequency_candles).__name__!r}, "
                 f"expected int, float, or 'auto', using default {default_label_frequency_candles}"
             )
             label_frequency_candles = default_label_frequency_candles
@@ -875,7 +875,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
     def set_optuna_label_candle(self, pair: str) -> None:
         if len(self._optuna_label_candle_pool) == 0:
             logger.warning(
-                f"Optuna {pair} label candle pool is empty, reinitializing it ("
+                f"[{pair}] Optuna label candle pool is empty, reinitializing it ("
                 f"{self._optuna_label_candle_pool=} ,"
                 f"{self._optuna_label_candle_pool_full=} ,"
                 f"{self._optuna_label_candle.values()=} ,"
@@ -1019,7 +1019,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                     test_weights = test_weights[-test_period_candles:]
             elif optuna_train_value >= optuna_hp_value:
                 logger.warning(
-                    f"Optuna {dk.pair} {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[1]} RMSE {format_number(optuna_train_value)} is not better than {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[0]} RMSE {format_number(optuna_hp_value)}, skipping training sets sizing optimization"
+                    f"[{dk.pair}] Optuna {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[1]} RMSE {format_number(optuna_train_value)} is not better than {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[0]} RMSE {format_number(optuna_hp_value)}, skipping training sets sizing optimization"
                 )
 
         eval_set, eval_weights = QuickAdapterRegressorV3.eval_set_and_weights(
@@ -1072,7 +1072,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 callback()
             except Exception as e:
                 logger.error(
-                    f"Optuna {pair} {namespace} callback execution failed: {e!r}",
+                    f"[{pair}] Optuna {namespace} callback execution failed: {e!r}",
                     exc_info=True,
                 )
             finally:
@@ -1080,7 +1080,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 self._optuna_label_candles[pair] = 0
         else:
             logger.info(
-                f"Optuna {pair} {namespace} callback throttled, still {optuna_label_remaining_candles} candles to go"
+                f"[{pair}] Optuna {namespace} callback throttled, still {optuna_label_remaining_candles} candles to go"
             )
         if len(self._optuna_label_incremented_pairs) >= len(self.pairs):
             self._optuna_label_incremented_pairs = []
@@ -1136,7 +1136,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             )
             if candles_diff < 0:
                 logger.warning(
-                    f"{pair}: fit live predictions not warmed up yet, still {abs(candles_diff)} candles to go"
+                    f"[{pair}] Fit live predictions not warmed up yet, still {abs(candles_diff)} candles to go"
                 )
                 warmed_up = False
 
@@ -2215,7 +2215,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
 
         objective_type = "single" if is_study_single_objective else "multi"
         logger.info(
-            f"Optuna {pair} {namespace} {objective_type} objective hyperopt started"
+            f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt started"
         )
         start_time = time.time()
         try:
@@ -2229,7 +2229,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
         except Exception as e:
             time_spent = time.time() - start_time
             logger.error(
-                f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
+                f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
                 exc_info=True,
             )
             return
@@ -2238,7 +2238,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
         if is_study_single_objective:
             if not QuickAdapterRegressorV3.optuna_study_has_best_trial(study):
                 logger.error(
-                    f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
+                    f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
                 )
                 return
             self.set_optuna_value(pair, namespace, study.best_value)
@@ -2255,13 +2255,13 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 )
             except Exception as e:
                 logger.error(
-                    f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
+                    f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
                     exc_info=True,
                 )
                 best_trial = None
             if not best_trial:
                 logger.error(
-                    f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
+                    f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
                 )
                 return
             self.set_optuna_values(pair, namespace, best_trial.values)
@@ -2272,7 +2272,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             }
             metric_log_msg = f" using {self.ft_params.get('label_metric', QuickAdapterRegressorV3._SCIPY_METRICS[2])} metric"
         logger.info(
-            f"Optuna {pair} {namespace} {objective_type} objective hyperopt done{metric_log_msg} ({time_spent:.2f} secs)"
+            f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt done{metric_log_msg} ({time_spent:.2f} secs)"
         )
         for key, value in study_best_results.items():
             if isinstance(value, list):
@@ -2284,11 +2284,11 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             else:
                 formatted_value = repr(value)
             logger.info(
-                f"Optuna {pair} {namespace} {objective_type} objective hyperopt | {key:>20s} : {formatted_value}"
+                f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt | {key:>20s} : {formatted_value}"
             )
         if not self.optuna_validate_params(pair, namespace, study):
             logger.warning(
-                f"Optuna {pair} {namespace} {objective_type} objective hyperopt best params found has invalid optimization target value(s)"
+                f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt best params found has invalid optimization target value(s)"
             )
         self.optuna_save_best_params(pair, namespace)
         return study
@@ -2379,7 +2379,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             storage = self.optuna_create_storage(pair)
         except Exception as e:
             logger.error(
-                f"Optuna {pair} {namespace} storage creation failed for study {study_name}: {e!r}",
+                f"[{pair}] Optuna {namespace} storage creation failed for study {study_name}: {e!r}",
                 exc_info=True,
             )
             return None
@@ -2400,7 +2400,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             )
         except Exception as e:
             logger.error(
-                f"Optuna {pair} {namespace} study creation failed ({study_name}): {e!r}",
+                f"[{pair}] Optuna {namespace} study creation failed ({study_name}): {e!r}",
                 exc_info=True,
             )
             return None
@@ -2440,7 +2440,8 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
             study.enqueue_trial(best_params)
         except Exception as e:
             logger.warning(
-                f"Optuna {pair} {namespace} failed to enqueue previous best params: {e!r}"
+                f"[{pair}] Optuna {namespace} failed to enqueue previous best params: {e!r}",
+                exc_info=True,
             )
 
     def optuna_save_best_params(self, pair: str, namespace: str) -> None:
@@ -2452,7 +2453,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 json.dump(self.get_optuna_params(pair, namespace), write_file, indent=4)
         except Exception as e:
             logger.error(
-                f"Optuna {pair} {namespace} failed to save best params: {e!r}",
+                f"[{pair}] Optuna {namespace} failed to save best params: {e!r}",
                 exc_info=True,
             )
             raise
@@ -2475,7 +2476,9 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
         try:
             optuna.delete_study(study_name=study_name, storage=storage)
         except Exception as e:
-            logger.warning(f"Optuna study deletion failed ({study_name}): {e!r}")
+            logger.warning(
+                f"Optuna study {study_name} deletion failed: {e!r}", exc_info=True
+            )
 
     @staticmethod
     def optuna_load_study(
index 76ea9d44bf2dd774c89526654a776148a90dc0ef..f7671790f3de879d1c68589a8a8a007f8bf9a17e 100644 (file)
@@ -741,7 +741,7 @@ class QuickAdapterV3(IStrategy):
         )
         if weighting_strategy not in set(WEIGHT_STRATEGIES):
             logger.warning(
-                f"Invalid extrema_weighting strategy '{weighting_strategy}', using default '{WEIGHT_STRATEGIES[0]}'"
+                f"Invalid extrema_weighting strategy {weighting_strategy!r}, using default {WEIGHT_STRATEGIES[0]!r}"
             )
             weighting_strategy = WEIGHT_STRATEGIES[0]
 
@@ -753,7 +753,7 @@ class QuickAdapterV3(IStrategy):
         )
         if weighting_standardization not in set(STANDARDIZATION_TYPES):
             logger.warning(
-                f"Invalid extrema_weighting standardization '{weighting_standardization}', using default '{STANDARDIZATION_TYPES[0]}'"
+                f"Invalid extrema_weighting standardization {weighting_standardization!r}, using default {STANDARDIZATION_TYPES[0]!r}"
             )
             weighting_standardization = STANDARDIZATION_TYPES[0]
 
@@ -770,7 +770,7 @@ class QuickAdapterV3(IStrategy):
             or weighting_robust_quantiles[0] >= weighting_robust_quantiles[1]
         ):
             logger.warning(
-                f"Invalid extrema_weighting robust_quantiles {weighting_robust_quantiles}, must be (q1, q3) with 0 <= q1 < q3 <= 1, using default {DEFAULTS_EXTREMA_WEIGHTING['robust_quantiles']}"
+                f"Invalid extrema_weighting robust_quantiles {weighting_robust_quantiles!r}, must be (q1, q3) with 0 <= q1 < q3 <= 1, using default {DEFAULTS_EXTREMA_WEIGHTING['robust_quantiles']!r}"
             )
             weighting_robust_quantiles = DEFAULTS_EXTREMA_WEIGHTING["robust_quantiles"]
         else:
@@ -788,7 +788,7 @@ class QuickAdapterV3(IStrategy):
             or weighting_mmad_scaling_factor <= 0
         ):
             logger.warning(
-                f"Invalid extrema_weighting mmad_scaling_factor {weighting_mmad_scaling_factor}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['mmad_scaling_factor']}"
+                f"Invalid extrema_weighting mmad_scaling_factor {weighting_mmad_scaling_factor!r}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['mmad_scaling_factor']!r}"
             )
             weighting_mmad_scaling_factor = DEFAULTS_EXTREMA_WEIGHTING[
                 "mmad_scaling_factor"
@@ -802,7 +802,7 @@ class QuickAdapterV3(IStrategy):
         )
         if weighting_normalization not in set(NORMALIZATION_TYPES):
             logger.warning(
-                f"Invalid extrema_weighting normalization '{weighting_normalization}', using default '{NORMALIZATION_TYPES[0]}'"
+                f"Invalid extrema_weighting normalization {weighting_normalization!r}, using default {NORMALIZATION_TYPES[0]!r}"
             )
             weighting_normalization = NORMALIZATION_TYPES[0]
 
@@ -837,7 +837,7 @@ class QuickAdapterV3(IStrategy):
             or weighting_minmax_range[0] >= weighting_minmax_range[1]
         ):
             logger.warning(
-                f"Invalid extrema_weighting minmax_range {weighting_minmax_range}, must be (min, max) with min < max, using default {DEFAULTS_EXTREMA_WEIGHTING['minmax_range']}"
+                f"Invalid extrema_weighting minmax_range {weighting_minmax_range!r}, must be (min, max) with min < max, using default {DEFAULTS_EXTREMA_WEIGHTING['minmax_range']!r}"
             )
             weighting_minmax_range = DEFAULTS_EXTREMA_WEIGHTING["minmax_range"]
         else:
@@ -855,7 +855,7 @@ class QuickAdapterV3(IStrategy):
             or weighting_sigmoid_scale <= 0
         ):
             logger.warning(
-                f"Invalid extrema_weighting sigmoid_scale {weighting_sigmoid_scale}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['sigmoid_scale']}"
+                f"Invalid extrema_weighting sigmoid_scale {weighting_sigmoid_scale!r}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['sigmoid_scale']!r}"
             )
             weighting_sigmoid_scale = DEFAULTS_EXTREMA_WEIGHTING["sigmoid_scale"]
 
@@ -868,7 +868,7 @@ class QuickAdapterV3(IStrategy):
             or weighting_softmax_temperature <= 0
         ):
             logger.warning(
-                f"Invalid extrema_weighting softmax_temperature {weighting_softmax_temperature}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['softmax_temperature']}"
+                f"Invalid extrema_weighting softmax_temperature {weighting_softmax_temperature!r}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['softmax_temperature']!r}"
             )
             weighting_softmax_temperature = DEFAULTS_EXTREMA_WEIGHTING[
                 "softmax_temperature"
@@ -881,7 +881,7 @@ class QuickAdapterV3(IStrategy):
         )
         if weighting_rank_method not in set(RANK_METHODS):
             logger.warning(
-                f"Invalid extrema_weighting rank_method '{weighting_rank_method}', using default '{RANK_METHODS[0]}'"
+                f"Invalid extrema_weighting rank_method {weighting_rank_method!r}, using default {RANK_METHODS[0]!r}"
             )
             weighting_rank_method = RANK_METHODS[0]
 
@@ -895,7 +895,7 @@ class QuickAdapterV3(IStrategy):
             or not (0 < weighting_gamma <= 10.0)
         ):
             logger.warning(
-                f"Invalid extrema_weighting gamma {weighting_gamma}, must be a finite number in (0, 10], using default {DEFAULTS_EXTREMA_WEIGHTING['gamma']}"
+                f"Invalid extrema_weighting gamma {weighting_gamma!r}, must be a finite number in (0, 10], using default {DEFAULTS_EXTREMA_WEIGHTING['gamma']!r}"
             )
             weighting_gamma = DEFAULTS_EXTREMA_WEIGHTING["gamma"]
 
@@ -904,7 +904,7 @@ class QuickAdapterV3(IStrategy):
         )
         if not isinstance(weighting_source_weights, dict):
             logger.warning(
-                f"Invalid extrema_weighting source_weights {weighting_source_weights}, must be a dict of source name to weight, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']}"
+                f"Invalid extrema_weighting source_weights {weighting_source_weights!r}, must be a dict of source name to weight, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']!r}"
             )
             weighting_source_weights = DEFAULTS_EXTREMA_WEIGHTING["source_weights"]
         else:
@@ -921,7 +921,7 @@ class QuickAdapterV3(IStrategy):
                 sanitized_source_weights[str(source)] = float(weight)
             if not sanitized_source_weights:
                 logger.warning(
-                    f"Invalid/empty extrema_weighting source_weights, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']}"
+                    f"Invalid/empty extrema_weighting source_weights {weighting_source_weights!r}, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']!r}"
                 )
                 weighting_source_weights = DEFAULTS_EXTREMA_WEIGHTING["source_weights"]
             else:
@@ -934,7 +934,7 @@ class QuickAdapterV3(IStrategy):
         )
         if weighting_aggregation not in set(HYBRID_AGGREGATIONS):
             logger.warning(
-                f"Invalid extrema_weighting aggregation '{weighting_aggregation}', using default '{HYBRID_AGGREGATIONS[0]}'"
+                f"Invalid extrema_weighting aggregation {weighting_aggregation!r}, using default {HYBRID_AGGREGATIONS[0]!r}"
             )
             weighting_aggregation = DEFAULTS_EXTREMA_WEIGHTING["aggregation"]
         weighting_aggregation_normalization = str(
@@ -945,7 +945,7 @@ class QuickAdapterV3(IStrategy):
         )
         if weighting_aggregation_normalization not in set(NORMALIZATION_TYPES):
             logger.warning(
-                f"Invalid extrema_weighting aggregation_normalization '{weighting_aggregation_normalization}', using default '{NORMALIZATION_TYPES[6]}'"
+                f"Invalid extrema_weighting aggregation_normalization {weighting_aggregation_normalization!r}, using default {NORMALIZATION_TYPES[6]!r}"
             )
             weighting_aggregation_normalization = DEFAULTS_EXTREMA_WEIGHTING[
                 "aggregation_normalization"
@@ -991,7 +991,7 @@ class QuickAdapterV3(IStrategy):
         )
         if smoothing_method not in set(SMOOTHING_METHODS):
             logger.warning(
-                f"Invalid extrema_smoothing method '{smoothing_method}', using default '{SMOOTHING_METHODS[0]}'"
+                f"Invalid extrema_smoothing method {smoothing_method!r}, using default {SMOOTHING_METHODS[0]!r}"
             )
             smoothing_method = SMOOTHING_METHODS[0]
 
@@ -1000,7 +1000,7 @@ class QuickAdapterV3(IStrategy):
         )
         if not isinstance(smoothing_window, int) or smoothing_window < 3:
             logger.warning(
-                f"Invalid extrema_smoothing window {smoothing_window}, must be an integer >= 3, using default {DEFAULTS_EXTREMA_SMOOTHING['window']}"
+                f"Invalid extrema_smoothing window {smoothing_window!r}, must be an integer >= 3, using default {DEFAULTS_EXTREMA_SMOOTHING['window']!r}"
             )
             smoothing_window = DEFAULTS_EXTREMA_SMOOTHING["window"]
 
@@ -1013,7 +1013,7 @@ class QuickAdapterV3(IStrategy):
             or smoothing_beta <= 0
         ):
             logger.warning(
-                f"Invalid extrema_smoothing beta {smoothing_beta}, must be a finite number > 0, using default {DEFAULTS_EXTREMA_SMOOTHING['beta']}"
+                f"Invalid extrema_smoothing beta {smoothing_beta!r}, must be a finite number > 0, using default {DEFAULTS_EXTREMA_SMOOTHING['beta']!r}"
             )
             smoothing_beta = DEFAULTS_EXTREMA_SMOOTHING["beta"]
 
@@ -1022,7 +1022,7 @@ class QuickAdapterV3(IStrategy):
         )
         if not isinstance(smoothing_polyorder, int) or smoothing_polyorder < 1:
             logger.warning(
-                f"Invalid extrema_smoothing polyorder {smoothing_polyorder}, must be an integer >= 1, using default {DEFAULTS_EXTREMA_SMOOTHING['polyorder']}"
+                f"Invalid extrema_smoothing polyorder {smoothing_polyorder!r}, must be an integer >= 1, using default {DEFAULTS_EXTREMA_SMOOTHING['polyorder']!r}"
             )
             smoothing_polyorder = DEFAULTS_EXTREMA_SMOOTHING["polyorder"]
 
@@ -1031,7 +1031,7 @@ class QuickAdapterV3(IStrategy):
         )
         if smoothing_mode not in set(SMOOTHING_MODES):
             logger.warning(
-                f"Invalid extrema_smoothing mode '{smoothing_mode}', using default '{SMOOTHING_MODES[0]}'"
+                f"Invalid extrema_smoothing mode {smoothing_mode!r}, using default {SMOOTHING_MODES[0]!r}"
             )
             smoothing_mode = SMOOTHING_MODES[0]
 
@@ -1044,7 +1044,7 @@ class QuickAdapterV3(IStrategy):
             or not np.isfinite(smoothing_sigma)
         ):
             logger.warning(
-                f"Invalid extrema_smoothing sigma {smoothing_sigma}, must be a positive finite number, using default {DEFAULTS_EXTREMA_SMOOTHING['sigma']}"
+                f"Invalid extrema_smoothing sigma {smoothing_sigma!r}, must be a positive finite number, using default {DEFAULTS_EXTREMA_SMOOTHING['sigma']!r}"
             )
             smoothing_sigma = DEFAULTS_EXTREMA_SMOOTHING["sigma"]
 
@@ -1104,11 +1104,11 @@ class QuickAdapterV3(IStrategy):
 
         if len(pivots_indices) == 0:
             logger.warning(
-                f"{pair}: no extrema to label (label_period={QuickAdapterV3._td_format(label_period)} / {label_period_candles=} / {label_natr_ratio=:.2f})"
+                f"[{pair}] No extrema to label | label_period: {QuickAdapterV3._td_format(label_period)} | {label_period_candles=} | {label_natr_ratio=:.2f}"
             )
         else:
             logger.info(
-                f"{pair}: labeled {len(pivots_indices)} extrema (label_period={QuickAdapterV3._td_format(label_period)} / {label_period_candles=} / {label_natr_ratio=:.2f})"
+                f"[{pair}] Labeled {len(pivots_indices)} extrema | label_period: {QuickAdapterV3._td_format(label_period)} | {label_period_candles=} | {label_natr_ratio=:.2f}"
             )
             dataframe.loc[pivots_indices, EXTREMA_COLUMN] = pivots_directions
 
@@ -1167,9 +1167,9 @@ class QuickAdapterV3(IStrategy):
 
         if debug:
             extrema = dataframe[EXTREMA_COLUMN]
-            logger.info(f"{extrema.to_numpy()=}")
+            logger.debug(f"{extrema.to_numpy()=}")
             n_extrema: int = calculate_n_extrema(extrema)
-            logger.info(f"{n_extrema=}")
+            logger.debug(f"{n_extrema=}")
         return dataframe
 
     def populate_indicators(
@@ -1387,7 +1387,7 @@ class QuickAdapterV3(IStrategy):
                     return trade_kama_natr_values[-1]
             except Exception as e:
                 logger.warning(
-                    f"{pair}: failed to calculate trade NATR KAMA: {e!r}, falling back to last trade NATR value",
+                    f"[{pair}] Failed to calculate trade NATR KAMA: {e!r}, falling back to last trade NATR value",
                     exc_info=True,
                 )
         return label_natr.iloc[-1]
@@ -1507,7 +1507,9 @@ class QuickAdapterV3(IStrategy):
             try:
                 callback()
             except Exception as e:
-                logger.error(f"{pair}: callback execution failed: {e!r}", exc_info=True)
+                logger.error(
+                    f"[{pair}] Callback execution failed: {e!r}", exc_info=True
+                )
 
             threshold_secs = 10 * candle_duration_secs
             keys_to_remove = [
@@ -2260,7 +2262,7 @@ class QuickAdapterV3(IStrategy):
                 n_outliers = trade.get_custom_data("n_outliers", 0)
                 n_outliers += 1
                 logger.warning(
-                    f"{pair}: detected new predictions outlier ({n_outliers=}) on trade {trade.id}"
+                    f"[{pair}] Detected new predictions outlier ({n_outliers=}) on trade {trade.id}"
                 )
                 trade.set_custom_data("n_outliers", n_outliers)
                 trade.set_custom_data("last_outlier_date", last_candle_date.isoformat())
index b6b6f7bd6f44ef0b349e0727e0f93ffd186a79be..a33eb97632f4926b6fe3f1921f101580d3abe6fb 100644 (file)
@@ -2379,7 +2379,7 @@ def validate_range(
 
     if sanitized_min != min_val or sanitized_max != max_val:
         logger.warning(
-            f"Invalid {name} values sanitized: {min_name}={sanitized_min}, {max_name}={sanitized_max} (defaults=({default_min}, {default_max}))"
+            f"Invalid {name} range ({min_name}={min_val!r}, {max_name}={max_val!r}), using ({sanitized_min!r}, {sanitized_max!r})"
         )
 
     return sanitized_min, sanitized_max