From 34d0c56c8154b89bc23a798325fd9bcd3966edb3 Mon Sep 17 00:00:00 2001 From: =?utf8?q?J=C3=A9r=C3=B4me=20Benoit?= Date: Fri, 21 Nov 2025 14:31:40 +0100 Subject: [PATCH] feat(qav3): add more extrema weighting normalization methods MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Signed-off-by: Jérôme Benoit --- README.md | 2 +- .../user_data/strategies/QuickAdapterV3.py | 6 +- quickadapter/user_data/strategies/Utils.py | 102 +++++++++++++++--- 3 files changed, 91 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 804b289..cddb991 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ docker compose up -d --build | freqai.extrema_smoothing.beta | 8.0 | float > 0 | Kaiser kernel shape parameter. | | _Extrema weighting_ | | | | | freqai.extrema_weighting.strategy | `none` | enum {`none`,`threshold`} | Weighting strategy applied before smoothing. | -| freqai.extrema_weighting.normalization | `minmax` | enum {`minmax`,`l1`,`none`} | Normalization method for weights. | +| freqai.extrema_weighting.normalization | `minmax` | enum {`minmax`,`zscore`,`l1`,`l2`,`none`} | Normalization method for weights. | | freqai.extrema_weighting.gamma | 1.0 | float (0,10] | Contrast exponent applied after normalization (>1 emphasizes extremes, 0= 1 | Zigzag labeling NATR horizon. | diff --git a/quickadapter/user_data/strategies/QuickAdapterV3.py b/quickadapter/user_data/strategies/QuickAdapterV3.py index 50ba6d0..d2ec3a4 100644 --- a/quickadapter/user_data/strategies/QuickAdapterV3.py +++ b/quickadapter/user_data/strategies/QuickAdapterV3.py @@ -637,7 +637,7 @@ class QuickAdapterV3(IStrategy): smoothing_method = str( extrema_smoothing.get("method", DEFAULTS_EXTREMA_SMOOTHING["method"]) ) - if smoothing_method not in SMOOTHING_METHODS: + if smoothing_method not in set(SMOOTHING_METHODS): logger.warning( f"{pair}: invalid extrema_smoothing method '{smoothing_method}', using default '{SMOOTHING_METHODS[0]}'" ) @@ -656,7 +656,7 @@ class QuickAdapterV3(IStrategy): weighting_strategy = str( extrema_weighting.get("strategy", DEFAULTS_EXTREMA_WEIGHTING["strategy"]) ) - if weighting_strategy not in WEIGHT_STRATEGIES: + if weighting_strategy not in set(WEIGHT_STRATEGIES): logger.warning( f"{pair}: invalid extrema_weighting strategy '{weighting_strategy}', using default '{WEIGHT_STRATEGIES[0]}'" ) @@ -666,7 +666,7 @@ class QuickAdapterV3(IStrategy): "normalization", DEFAULTS_EXTREMA_WEIGHTING["normalization"] ) ) - if weighting_normalization not in NORMALIZATION_TYPES: + if weighting_normalization not in set(NORMALIZATION_TYPES): logger.warning( f"{pair}: invalid extrema_weighting normalization '{weighting_normalization}', using default '{NORMALIZATION_TYPES[0]}'" ) diff --git a/quickadapter/user_data/strategies/Utils.py b/quickadapter/user_data/strategies/Utils.py index 50023dc..6b5cbda 100644 --- a/quickadapter/user_data/strategies/Utils.py +++ b/quickadapter/user_data/strategies/Utils.py @@ -21,8 +21,14 @@ T = TypeVar("T", pd.Series, float) WeightStrategy = Literal["none", "threshold"] WEIGHT_STRATEGIES: Final[tuple[WeightStrategy, ...]] = ("none", "threshold") -NormalizationType = Literal["minmax", "l1", "none"] -NORMALIZATION_TYPES: Final[tuple[NormalizationType, ...]] = ("minmax", "l1", "none") +NormalizationType = Literal["minmax", "zscore", "l1", "l2", "none"] +NORMALIZATION_TYPES: Final[tuple[NormalizationType, ...]] = ( + "minmax", # 0 + "zscore", # 1 + "l1", # 2 + "l2", # 3 + "none", # 4 +) SmoothingKernel = Literal["gaussian", "kaiser", "triang"] SmoothingMethod = Union[SmoothingKernel, Literal["smm", "sma"]] @@ -166,6 +172,50 @@ def smooth_extrema( ) +def zscore_normalize_weights( + weights: NDArray[np.floating], + rescale_to_unit_range: bool = True, +) -> NDArray[np.floating]: + if weights.size == 0: + return weights + + weights = weights.astype(float, copy=False) + + if np.isnan(weights).any(): + return np.full_like(weights, 1.0, dtype=float) + + if weights.size == 1 or np.allclose(weights, weights[0]): + if rescale_to_unit_range: + return np.full_like(weights, 1.0, dtype=float) + else: + return np.zeros_like(weights, dtype=float) + + try: + z_scores = sp.stats.zscore(weights, ddof=1, nan_policy="raise") + except Exception: + return np.full_like(weights, 1.0, dtype=float) + + if np.isnan(z_scores).any() or not np.isfinite(z_scores).all(): + return np.full_like(weights, 1.0, dtype=float) + + if not rescale_to_unit_range: + return z_scores + + z_min = np.min(z_scores) + z_max = np.max(z_scores) + z_range = z_max - z_min + + if np.isclose(z_range, 0.0): + return np.full_like(weights, 1.0, dtype=float) + + normalized_weights = (z_scores - z_min) / z_range + + if np.isnan(normalized_weights).any(): + return np.full_like(weights, 1.0, dtype=float) + + return normalized_weights + + def normalize_weights( weights: NDArray[np.floating], normalization: NormalizationType = DEFAULTS_EXTREMA_WEIGHTING["normalization"], @@ -173,9 +223,11 @@ def normalize_weights( ) -> NDArray[np.floating]: if weights.size == 0: return weights - if normalization == NORMALIZATION_TYPES[2]: # "none" + if normalization == NORMALIZATION_TYPES[4]: # "none" return weights + normalized_weights: NDArray[np.floating] + if normalization == NORMALIZATION_TYPES[0]: # "minmax" weights = weights.astype(float, copy=False) if np.isnan(weights).any(): @@ -190,26 +242,46 @@ def normalize_weights( normalized_weights = (weights - w_min) / w_range if np.isnan(normalized_weights).any(): return np.full_like(weights, 1.0, dtype=float) - if gamma != 1.0 and np.isfinite(gamma) and gamma > 0: - normalized_weights = np.power(normalized_weights, gamma) - if np.isnan(normalized_weights).any(): - return np.full_like(weights, 1.0, dtype=float) - return normalized_weights - if normalization == NORMALIZATION_TYPES[1]: # "l1" + elif normalization == NORMALIZATION_TYPES[1]: # "zscore" + normalized_weights = zscore_normalize_weights( + weights, rescale_to_unit_range=True + ) + + elif normalization == NORMALIZATION_TYPES[2]: # "l1" weights_sum = np.sum(np.abs(weights)) if weights_sum <= 0 or not np.isfinite(weights_sum): return np.full_like(weights, 1.0, dtype=float) normalized_weights = weights / weights_sum if np.isnan(normalized_weights).any(): return np.full_like(weights, 1.0, dtype=float) - if gamma != 1.0 and np.isfinite(gamma) and gamma > 0: - normalized_weights = np.power(normalized_weights, gamma) - if np.isnan(normalized_weights).any(): - return np.full_like(weights, 1.0, dtype=float) - return normalized_weights - raise ValueError(f"Unknown normalization method: {normalization}") + elif normalization == NORMALIZATION_TYPES[3]: # "l2" + weights = weights.astype(float, copy=False) + if np.isnan(weights).any(): + return np.full_like(weights, 1.0, dtype=float) + + l2_norm = np.linalg.norm(weights, ord=2) + + if l2_norm <= 0 or not np.isfinite(l2_norm): + return np.full_like(weights, 1.0, dtype=float) + + normalized_weights = weights / l2_norm + + if np.isnan(normalized_weights).any(): + return np.full_like(weights, 1.0, dtype=float) + + else: + raise ValueError(f"Unknown normalization method: {normalization}") + + if not np.isclose(gamma, 1.0) and np.isfinite(gamma) and gamma > 0: + normalized_weights = np.power(np.abs(normalized_weights), gamma) * np.sign( + normalized_weights + ) + if np.isnan(normalized_weights).any(): + return np.full_like(weights, 1.0, dtype=float) + + return normalized_weights def calculate_extrema_weights( -- 2.43.0