label_frequency_candles = default_label_frequency_candles
else:
logger.warning(
- f"Invalid string value for label_frequency_candles: '{label_frequency_candles}', "
+ f"Invalid string value for label_frequency_candles {label_frequency_candles!r}, "
f"only 'auto' is supported, using default {default_label_frequency_candles}"
)
label_frequency_candles = default_label_frequency_candles
label_frequency_candles = int(label_frequency_candles)
else:
logger.warning(
- f"Invalid numeric value for label_frequency_candles: {label_frequency_candles}, "
- f"must be between 2 and 10000, using default {default_label_frequency_candles}"
+ f"Invalid numeric value for label_frequency_candles: {label_frequency_candles!r}, "
+ f"must be between 2 and 10000, using default {default_label_frequency_candles!r}"
)
label_frequency_candles = default_label_frequency_candles
else:
logger.warning(
- f"Invalid type for label_frequency_candles: {type(label_frequency_candles).__name__}, "
+ f"Invalid type for label_frequency_candles {type(label_frequency_candles).__name__!r}, "
f"expected int, float, or 'auto', using default {default_label_frequency_candles}"
)
label_frequency_candles = default_label_frequency_candles
def set_optuna_label_candle(self, pair: str) -> None:
if len(self._optuna_label_candle_pool) == 0:
logger.warning(
- f"Optuna {pair} label candle pool is empty, reinitializing it ("
+ f"[{pair}] Optuna label candle pool is empty, reinitializing it ("
f"{self._optuna_label_candle_pool=} ,"
f"{self._optuna_label_candle_pool_full=} ,"
f"{self._optuna_label_candle.values()=} ,"
test_weights = test_weights[-test_period_candles:]
elif optuna_train_value >= optuna_hp_value:
logger.warning(
- f"Optuna {dk.pair} {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[1]} RMSE {format_number(optuna_train_value)} is not better than {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[0]} RMSE {format_number(optuna_hp_value)}, skipping training sets sizing optimization"
+ f"[{dk.pair}] Optuna {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[1]} RMSE {format_number(optuna_train_value)} is not better than {QuickAdapterRegressorV3._OPTUNA_NAMESPACES[0]} RMSE {format_number(optuna_hp_value)}, skipping training sets sizing optimization"
)
eval_set, eval_weights = QuickAdapterRegressorV3.eval_set_and_weights(
callback()
except Exception as e:
logger.error(
- f"Optuna {pair} {namespace} callback execution failed: {e!r}",
+ f"[{pair}] Optuna {namespace} callback execution failed: {e!r}",
exc_info=True,
)
finally:
self._optuna_label_candles[pair] = 0
else:
logger.info(
- f"Optuna {pair} {namespace} callback throttled, still {optuna_label_remaining_candles} candles to go"
+ f"[{pair}] Optuna {namespace} callback throttled, still {optuna_label_remaining_candles} candles to go"
)
if len(self._optuna_label_incremented_pairs) >= len(self.pairs):
self._optuna_label_incremented_pairs = []
)
if candles_diff < 0:
logger.warning(
- f"{pair}: fit live predictions not warmed up yet, still {abs(candles_diff)} candles to go"
+ f"[{pair}] Fit live predictions not warmed up yet, still {abs(candles_diff)} candles to go"
)
warmed_up = False
objective_type = "single" if is_study_single_objective else "multi"
logger.info(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt started"
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt started"
)
start_time = time.time()
try:
except Exception as e:
time_spent = time.time() - start_time
logger.error(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
exc_info=True,
)
return
if is_study_single_objective:
if not QuickAdapterRegressorV3.optuna_study_has_best_trial(study):
logger.error(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
)
return
self.set_optuna_value(pair, namespace, study.best_value)
)
except Exception as e:
logger.error(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): {e!r}",
exc_info=True,
)
best_trial = None
if not best_trial:
logger.error(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt failed ({time_spent:.2f} secs): no study best trial found"
)
return
self.set_optuna_values(pair, namespace, best_trial.values)
}
metric_log_msg = f" using {self.ft_params.get('label_metric', QuickAdapterRegressorV3._SCIPY_METRICS[2])} metric"
logger.info(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt done{metric_log_msg} ({time_spent:.2f} secs)"
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt done{metric_log_msg} ({time_spent:.2f} secs)"
)
for key, value in study_best_results.items():
if isinstance(value, list):
else:
formatted_value = repr(value)
logger.info(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt | {key:>20s} : {formatted_value}"
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt | {key:>20s} : {formatted_value}"
)
if not self.optuna_validate_params(pair, namespace, study):
logger.warning(
- f"Optuna {pair} {namespace} {objective_type} objective hyperopt best params found has invalid optimization target value(s)"
+ f"[{pair}] Optuna {namespace} {objective_type} objective hyperopt best params found has invalid optimization target value(s)"
)
self.optuna_save_best_params(pair, namespace)
return study
storage = self.optuna_create_storage(pair)
except Exception as e:
logger.error(
- f"Optuna {pair} {namespace} storage creation failed for study {study_name}: {e!r}",
+ f"[{pair}] Optuna {namespace} storage creation failed for study {study_name}: {e!r}",
exc_info=True,
)
return None
)
except Exception as e:
logger.error(
- f"Optuna {pair} {namespace} study creation failed ({study_name}): {e!r}",
+ f"[{pair}] Optuna {namespace} study creation failed ({study_name}): {e!r}",
exc_info=True,
)
return None
study.enqueue_trial(best_params)
except Exception as e:
logger.warning(
- f"Optuna {pair} {namespace} failed to enqueue previous best params: {e!r}"
+ f"[{pair}] Optuna {namespace} failed to enqueue previous best params: {e!r}",
+ exc_info=True,
)
def optuna_save_best_params(self, pair: str, namespace: str) -> None:
json.dump(self.get_optuna_params(pair, namespace), write_file, indent=4)
except Exception as e:
logger.error(
- f"Optuna {pair} {namespace} failed to save best params: {e!r}",
+ f"[{pair}] Optuna {namespace} failed to save best params: {e!r}",
exc_info=True,
)
raise
try:
optuna.delete_study(study_name=study_name, storage=storage)
except Exception as e:
- logger.warning(f"Optuna study deletion failed ({study_name}): {e!r}")
+ logger.warning(
+ f"Optuna study {study_name} deletion failed: {e!r}", exc_info=True
+ )
@staticmethod
def optuna_load_study(
)
if weighting_strategy not in set(WEIGHT_STRATEGIES):
logger.warning(
- f"Invalid extrema_weighting strategy '{weighting_strategy}', using default '{WEIGHT_STRATEGIES[0]}'"
+ f"Invalid extrema_weighting strategy {weighting_strategy!r}, using default {WEIGHT_STRATEGIES[0]!r}"
)
weighting_strategy = WEIGHT_STRATEGIES[0]
)
if weighting_standardization not in set(STANDARDIZATION_TYPES):
logger.warning(
- f"Invalid extrema_weighting standardization '{weighting_standardization}', using default '{STANDARDIZATION_TYPES[0]}'"
+ f"Invalid extrema_weighting standardization {weighting_standardization!r}, using default {STANDARDIZATION_TYPES[0]!r}"
)
weighting_standardization = STANDARDIZATION_TYPES[0]
or weighting_robust_quantiles[0] >= weighting_robust_quantiles[1]
):
logger.warning(
- f"Invalid extrema_weighting robust_quantiles {weighting_robust_quantiles}, must be (q1, q3) with 0 <= q1 < q3 <= 1, using default {DEFAULTS_EXTREMA_WEIGHTING['robust_quantiles']}"
+ f"Invalid extrema_weighting robust_quantiles {weighting_robust_quantiles!r}, must be (q1, q3) with 0 <= q1 < q3 <= 1, using default {DEFAULTS_EXTREMA_WEIGHTING['robust_quantiles']!r}"
)
weighting_robust_quantiles = DEFAULTS_EXTREMA_WEIGHTING["robust_quantiles"]
else:
or weighting_mmad_scaling_factor <= 0
):
logger.warning(
- f"Invalid extrema_weighting mmad_scaling_factor {weighting_mmad_scaling_factor}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['mmad_scaling_factor']}"
+ f"Invalid extrema_weighting mmad_scaling_factor {weighting_mmad_scaling_factor!r}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['mmad_scaling_factor']!r}"
)
weighting_mmad_scaling_factor = DEFAULTS_EXTREMA_WEIGHTING[
"mmad_scaling_factor"
)
if weighting_normalization not in set(NORMALIZATION_TYPES):
logger.warning(
- f"Invalid extrema_weighting normalization '{weighting_normalization}', using default '{NORMALIZATION_TYPES[0]}'"
+ f"Invalid extrema_weighting normalization {weighting_normalization!r}, using default {NORMALIZATION_TYPES[0]!r}"
)
weighting_normalization = NORMALIZATION_TYPES[0]
or weighting_minmax_range[0] >= weighting_minmax_range[1]
):
logger.warning(
- f"Invalid extrema_weighting minmax_range {weighting_minmax_range}, must be (min, max) with min < max, using default {DEFAULTS_EXTREMA_WEIGHTING['minmax_range']}"
+ f"Invalid extrema_weighting minmax_range {weighting_minmax_range!r}, must be (min, max) with min < max, using default {DEFAULTS_EXTREMA_WEIGHTING['minmax_range']!r}"
)
weighting_minmax_range = DEFAULTS_EXTREMA_WEIGHTING["minmax_range"]
else:
or weighting_sigmoid_scale <= 0
):
logger.warning(
- f"Invalid extrema_weighting sigmoid_scale {weighting_sigmoid_scale}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['sigmoid_scale']}"
+ f"Invalid extrema_weighting sigmoid_scale {weighting_sigmoid_scale!r}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['sigmoid_scale']!r}"
)
weighting_sigmoid_scale = DEFAULTS_EXTREMA_WEIGHTING["sigmoid_scale"]
or weighting_softmax_temperature <= 0
):
logger.warning(
- f"Invalid extrema_weighting softmax_temperature {weighting_softmax_temperature}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['softmax_temperature']}"
+ f"Invalid extrema_weighting softmax_temperature {weighting_softmax_temperature!r}, must be > 0, using default {DEFAULTS_EXTREMA_WEIGHTING['softmax_temperature']!r}"
)
weighting_softmax_temperature = DEFAULTS_EXTREMA_WEIGHTING[
"softmax_temperature"
)
if weighting_rank_method not in set(RANK_METHODS):
logger.warning(
- f"Invalid extrema_weighting rank_method '{weighting_rank_method}', using default '{RANK_METHODS[0]}'"
+ f"Invalid extrema_weighting rank_method {weighting_rank_method!r}, using default {RANK_METHODS[0]!r}"
)
weighting_rank_method = RANK_METHODS[0]
or not (0 < weighting_gamma <= 10.0)
):
logger.warning(
- f"Invalid extrema_weighting gamma {weighting_gamma}, must be a finite number in (0, 10], using default {DEFAULTS_EXTREMA_WEIGHTING['gamma']}"
+ f"Invalid extrema_weighting gamma {weighting_gamma!r}, must be a finite number in (0, 10], using default {DEFAULTS_EXTREMA_WEIGHTING['gamma']!r}"
)
weighting_gamma = DEFAULTS_EXTREMA_WEIGHTING["gamma"]
)
if not isinstance(weighting_source_weights, dict):
logger.warning(
- f"Invalid extrema_weighting source_weights {weighting_source_weights}, must be a dict of source name to weight, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']}"
+ f"Invalid extrema_weighting source_weights {weighting_source_weights!r}, must be a dict of source name to weight, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']!r}"
)
weighting_source_weights = DEFAULTS_EXTREMA_WEIGHTING["source_weights"]
else:
sanitized_source_weights[str(source)] = float(weight)
if not sanitized_source_weights:
logger.warning(
- f"Invalid/empty extrema_weighting source_weights, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']}"
+ f"Invalid/empty extrema_weighting source_weights {weighting_source_weights!r}, using default {DEFAULTS_EXTREMA_WEIGHTING['source_weights']!r}"
)
weighting_source_weights = DEFAULTS_EXTREMA_WEIGHTING["source_weights"]
else:
)
if weighting_aggregation not in set(HYBRID_AGGREGATIONS):
logger.warning(
- f"Invalid extrema_weighting aggregation '{weighting_aggregation}', using default '{HYBRID_AGGREGATIONS[0]}'"
+ f"Invalid extrema_weighting aggregation {weighting_aggregation!r}, using default {HYBRID_AGGREGATIONS[0]!r}"
)
weighting_aggregation = DEFAULTS_EXTREMA_WEIGHTING["aggregation"]
weighting_aggregation_normalization = str(
)
if weighting_aggregation_normalization not in set(NORMALIZATION_TYPES):
logger.warning(
- f"Invalid extrema_weighting aggregation_normalization '{weighting_aggregation_normalization}', using default '{NORMALIZATION_TYPES[6]}'"
+ f"Invalid extrema_weighting aggregation_normalization {weighting_aggregation_normalization!r}, using default {NORMALIZATION_TYPES[6]!r}"
)
weighting_aggregation_normalization = DEFAULTS_EXTREMA_WEIGHTING[
"aggregation_normalization"
)
if smoothing_method not in set(SMOOTHING_METHODS):
logger.warning(
- f"Invalid extrema_smoothing method '{smoothing_method}', using default '{SMOOTHING_METHODS[0]}'"
+ f"Invalid extrema_smoothing method {smoothing_method!r}, using default {SMOOTHING_METHODS[0]!r}"
)
smoothing_method = SMOOTHING_METHODS[0]
)
if not isinstance(smoothing_window, int) or smoothing_window < 3:
logger.warning(
- f"Invalid extrema_smoothing window {smoothing_window}, must be an integer >= 3, using default {DEFAULTS_EXTREMA_SMOOTHING['window']}"
+ f"Invalid extrema_smoothing window {smoothing_window!r}, must be an integer >= 3, using default {DEFAULTS_EXTREMA_SMOOTHING['window']!r}"
)
smoothing_window = DEFAULTS_EXTREMA_SMOOTHING["window"]
or smoothing_beta <= 0
):
logger.warning(
- f"Invalid extrema_smoothing beta {smoothing_beta}, must be a finite number > 0, using default {DEFAULTS_EXTREMA_SMOOTHING['beta']}"
+ f"Invalid extrema_smoothing beta {smoothing_beta!r}, must be a finite number > 0, using default {DEFAULTS_EXTREMA_SMOOTHING['beta']!r}"
)
smoothing_beta = DEFAULTS_EXTREMA_SMOOTHING["beta"]
)
if not isinstance(smoothing_polyorder, int) or smoothing_polyorder < 1:
logger.warning(
- f"Invalid extrema_smoothing polyorder {smoothing_polyorder}, must be an integer >= 1, using default {DEFAULTS_EXTREMA_SMOOTHING['polyorder']}"
+ f"Invalid extrema_smoothing polyorder {smoothing_polyorder!r}, must be an integer >= 1, using default {DEFAULTS_EXTREMA_SMOOTHING['polyorder']!r}"
)
smoothing_polyorder = DEFAULTS_EXTREMA_SMOOTHING["polyorder"]
)
if smoothing_mode not in set(SMOOTHING_MODES):
logger.warning(
- f"Invalid extrema_smoothing mode '{smoothing_mode}', using default '{SMOOTHING_MODES[0]}'"
+ f"Invalid extrema_smoothing mode {smoothing_mode!r}, using default {SMOOTHING_MODES[0]!r}"
)
smoothing_mode = SMOOTHING_MODES[0]
or not np.isfinite(smoothing_sigma)
):
logger.warning(
- f"Invalid extrema_smoothing sigma {smoothing_sigma}, must be a positive finite number, using default {DEFAULTS_EXTREMA_SMOOTHING['sigma']}"
+ f"Invalid extrema_smoothing sigma {smoothing_sigma!r}, must be a positive finite number, using default {DEFAULTS_EXTREMA_SMOOTHING['sigma']!r}"
)
smoothing_sigma = DEFAULTS_EXTREMA_SMOOTHING["sigma"]
if len(pivots_indices) == 0:
logger.warning(
- f"{pair}: no extrema to label (label_period={QuickAdapterV3._td_format(label_period)} / {label_period_candles=} / {label_natr_ratio=:.2f})"
+ f"[{pair}] No extrema to label | label_period: {QuickAdapterV3._td_format(label_period)} | {label_period_candles=} | {label_natr_ratio=:.2f}"
)
else:
logger.info(
- f"{pair}: labeled {len(pivots_indices)} extrema (label_period={QuickAdapterV3._td_format(label_period)} / {label_period_candles=} / {label_natr_ratio=:.2f})"
+ f"[{pair}] Labeled {len(pivots_indices)} extrema | label_period: {QuickAdapterV3._td_format(label_period)} | {label_period_candles=} | {label_natr_ratio=:.2f}"
)
dataframe.loc[pivots_indices, EXTREMA_COLUMN] = pivots_directions
if debug:
extrema = dataframe[EXTREMA_COLUMN]
- logger.info(f"{extrema.to_numpy()=}")
+ logger.debug(f"{extrema.to_numpy()=}")
n_extrema: int = calculate_n_extrema(extrema)
- logger.info(f"{n_extrema=}")
+ logger.debug(f"{n_extrema=}")
return dataframe
def populate_indicators(
return trade_kama_natr_values[-1]
except Exception as e:
logger.warning(
- f"{pair}: failed to calculate trade NATR KAMA: {e!r}, falling back to last trade NATR value",
+ f"[{pair}] Failed to calculate trade NATR KAMA: {e!r}, falling back to last trade NATR value",
exc_info=True,
)
return label_natr.iloc[-1]
try:
callback()
except Exception as e:
- logger.error(f"{pair}: callback execution failed: {e!r}", exc_info=True)
+ logger.error(
+ f"[{pair}] Callback execution failed: {e!r}", exc_info=True
+ )
threshold_secs = 10 * candle_duration_secs
keys_to_remove = [
n_outliers = trade.get_custom_data("n_outliers", 0)
n_outliers += 1
logger.warning(
- f"{pair}: detected new predictions outlier ({n_outliers=}) on trade {trade.id}"
+ f"[{pair}] Detected new predictions outlier ({n_outliers=}) on trade {trade.id}"
)
trade.set_custom_data("n_outliers", n_outliers)
trade.set_custom_data("last_outlier_date", last_candle_date.isoformat())