From e2964e21e0c1b7238c45f7a1b3ad538a936f7c2b Mon Sep 17 00:00:00 2001 From: =?utf8?q?J=C3=A9r=C3=B4me=20Benoit?= Date: Tue, 9 Dec 2025 14:25:27 +0100 Subject: [PATCH] refactor(qav3): remove unneeded intermediate variables MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Signed-off-by: Jérôme Benoit --- .../freqaimodels/QuickAdapterRegressorV3.py | 23 +++++++++------ .../user_data/strategies/QuickAdapterV3.py | 29 +++++++++---------- quickadapter/user_data/strategies/Utils.py | 23 +++++---------- 3 files changed, 36 insertions(+), 39 deletions(-) diff --git a/quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py b/quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py index b53674c..b3f4248 100644 --- a/quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py +++ b/quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py @@ -230,8 +230,7 @@ class QuickAdapterRegressorV3(BaseRegressionModel): Raises: ValueError: If no trading pairs are configured """ - n_pairs = len(self.pairs) - default_label_frequency_candles = max(2, 2 * n_pairs) + default_label_frequency_candles = max(2, 2 * len(self.pairs)) label_frequency_candles = self.config.get("feature_parameters", {}).get( "label_frequency_candles" @@ -331,11 +330,11 @@ class QuickAdapterRegressorV3(BaseRegressionModel): cache_key = label_frequency_candles if cache_key not in self._optuna_label_candle_pool_full_cache: half_label_frequency_candles = int(label_frequency_candles / 2) - min_offset = -half_label_frequency_candles - max_offset = half_label_frequency_candles self._optuna_label_candle_pool_full_cache[cache_key] = [ max(1, label_frequency_candles + offset) - for offset in range(min_offset, max_offset + 1) + for offset in range( + -half_label_frequency_candles, half_label_frequency_candles + 1 + ) ] return copy.deepcopy(self._optuna_label_candle_pool_full_cache[cache_key]) @@ -871,8 +870,10 @@ class QuickAdapterRegressorV3(BaseRegressionModel): fit_live_predictions_candles: int, label_period_candles: int, ) -> tuple[float, float]: - label_period_cycles = fit_live_predictions_candles / label_period_candles - thresholds_candles = max(2, int(label_period_cycles)) * label_period_candles + thresholds_candles = ( + max(2, int(fit_live_predictions_candles / label_period_candles)) + * label_period_candles + ) pred_extrema = pred_df.get(EXTREMA_COLUMN).iloc[-thresholds_candles:].copy() @@ -2279,8 +2280,12 @@ def label_objective( "label_natr_ratio", min_label_natr_ratio, max_label_natr_ratio, step=0.05 ) - label_period_cycles = fit_live_predictions_candles / label_period_candles - df = df.iloc[-(max(2, int(label_period_cycles)) * label_period_candles) :] + df = df.iloc[ + -( + max(2, int(fit_live_predictions_candles / label_period_candles)) + * label_period_candles + ) : + ] if df.empty: return 0, 0.0, 0.0 diff --git a/quickadapter/user_data/strategies/QuickAdapterV3.py b/quickadapter/user_data/strategies/QuickAdapterV3.py index 1d8ea62..c6fea7c 100644 --- a/quickadapter/user_data/strategies/QuickAdapterV3.py +++ b/quickadapter/user_data/strategies/QuickAdapterV3.py @@ -359,8 +359,7 @@ class QuickAdapterV3(IStrategy): if n == 0: return (0, None) dates = df.get("date") - last_date = dates.iloc[-1] if dates is not None and not dates.empty else None - return (n, last_date) + return (n, dates.iloc[-1] if dates is not None and not dates.empty else None) def _init_reversal_confirmation_defaults(self) -> None: reversal_confirmation = self.config.get("reversal_confirmation", {}) @@ -489,8 +488,9 @@ class QuickAdapterV3(IStrategy): zero_lag=True, normalize=True, ) - psar = ta.SAR(dataframe, acceleration=0.02, maximum=0.2) - dataframe["%-diff_to_psar"] = closes - psar + dataframe["%-diff_to_psar"] = closes - ta.SAR( + dataframe, acceleration=0.02, maximum=0.2 + ) kc = pta.kc( highs, lows, @@ -1045,9 +1045,9 @@ class QuickAdapterV3(IStrategy): current_date = dates.iloc[-1] if isna(current_date): return None - trade_duration_minutes = (current_date - entry_date).total_seconds() / 60.0 return int( - trade_duration_minutes / timeframe_to_minutes(self.config.get("timeframe")) + ((current_date - entry_date).total_seconds() / 60.0) + / timeframe_to_minutes(self.config.get("timeframe")) ) @staticmethod @@ -1094,11 +1094,10 @@ class QuickAdapterV3(IStrategy): max_weight: float = 1.0, weighting_exponent: float = 1.5, ) -> float: - normalized_distance_from_center = abs(quantile - 0.5) * 2.0 return ( min_weight + (max_weight - min_weight) - * normalized_distance_from_center**weighting_exponent + * (abs(quantile - 0.5) * 2.0) ** weighting_exponent ) entry_weight = calculate_weight(entry_quantile) @@ -1202,9 +1201,10 @@ class QuickAdapterV3(IStrategy): def get_trade_exit_stage(trade: Trade) -> int: n_open_orders = 0 if trade.has_open_orders: - exit_side = "buy" if trade.is_short else "sell" n_open_orders = sum( - 1 for open_order in trade.open_orders if open_order.side == exit_side + 1 + for open_order in trade.open_orders + if open_order.side == ("buy" if trade.is_short else "sell") ) return trade.nr_of_successful_exits + n_open_orders @@ -1271,8 +1271,9 @@ class QuickAdapterV3(IStrategy): timestamp = int(current_time.timestamp()) candle_duration_secs = max(1, int(self._candle_duration_secs)) candle_start_secs = (timestamp // candle_duration_secs) * candle_duration_secs - callback_hash = get_callable_sha256(callback) - key = hashlib.sha256(f"{pair}\x00{callback_hash}".encode()).hexdigest() + key = hashlib.sha256( + f"{pair}\x00{get_callable_sha256(callback)}".encode() + ).hexdigest() if candle_start_secs != self.last_candle_start_secs.get(key): self.last_candle_start_secs[key] = candle_start_secs try: @@ -1947,9 +1948,7 @@ class QuickAdapterV3(IStrategy): sigma_total = sigma_global + sigma_recent if sigma_total <= 0: return alpha_base - ratio = sigma_global / sigma_total - alpha_vol = alpha_base * (ratio**gamma) - return max(min_alpha, alpha_vol) + return max(min_alpha, alpha_base * ((sigma_global / sigma_total) ** gamma)) alpha_v = volatility_adjusted_alpha( alpha_len, std_v_global, std_v_recent, min_alpha=min_alpha diff --git a/quickadapter/user_data/strategies/Utils.py b/quickadapter/user_data/strategies/Utils.py index 70f7990..616f2f6 100644 --- a/quickadapter/user_data/strategies/Utils.py +++ b/quickadapter/user_data/strategies/Utils.py @@ -387,8 +387,7 @@ def _normalize_sigmoid( if scale <= 0 or not np.isfinite(scale): scale = 1.0 - scaled = scale * weights - return sp.special.expit(scaled) + return sp.special.expit(scale * weights) def _normalize_minmax( @@ -411,11 +410,9 @@ def _normalize_minmax( w_range = w_max - w_min if np.isclose(w_range, 0.0): - range_midpoint = midpoint(range[0], range[1]) - return np.full_like(weights, range_midpoint, dtype=float) + return np.full_like(weights, midpoint(range[0], range[1]), dtype=float) - normalized = (weights - w_min) / w_range - return range[0] + normalized * (range[1] - range[0]) + return range[0] + ((weights - w_min) / w_range) * (range[1] - range[0]) def _normalize_l1(weights: NDArray[np.floating]) -> NDArray[np.floating]: @@ -423,8 +420,7 @@ def _normalize_l1(weights: NDArray[np.floating]) -> NDArray[np.floating]: weights_sum = np.sum(np.abs(weights)) if weights_sum <= 0 or not np.isfinite(weights_sum): return np.full_like(weights, float(DEFAULT_EXTREMA_WEIGHT), dtype=float) - normalized_weights = weights / weights_sum - return normalized_weights + return weights / weights_sum def _normalize_l2(weights: NDArray[np.floating]) -> NDArray[np.floating]: @@ -438,8 +434,7 @@ def _normalize_l2(weights: NDArray[np.floating]) -> NDArray[np.floating]: if l2_norm <= 0 or not np.isfinite(l2_norm): return np.full_like(weights, float(DEFAULT_EXTREMA_WEIGHT), dtype=float) - normalized_weights = weights / l2_norm - return normalized_weights + return weights / l2_norm def _normalize_softmax( @@ -469,8 +464,7 @@ def _normalize_rank( if n <= 1: return np.full_like(weights, float(DEFAULT_EXTREMA_WEIGHT), dtype=float) - normalized_weights = (ranks - 1) / (n - 1) - return normalized_weights + return (ranks - 1) / (n - 1) def normalize_weights( @@ -1489,9 +1483,8 @@ def get_optuna_study_model_parameters( for param, (default_min, default_max) in default_ranges.items(): center_value = model_training_best_parameters.get(param) - if center_value is None: - center_value = midpoint(default_min, default_max) - elif not isinstance(center_value, (int, float)) or not np.isfinite( + center_value = center_value or midpoint(default_min, default_max) + if not isinstance(center_value, (int, float)) or not np.isfinite( center_value ): continue -- 2.43.0