]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
fix(qav3): correct normalization bounds and consolidate defaults
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Fri, 21 Nov 2025 17:21:13 +0000 (18:21 +0100)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Fri, 21 Nov 2025 17:21:13 +0000 (18:21 +0100)
- Fix tanh normalization formula to guarantee [0, gain] range
  Update formula: gain * 0.5 * (tanh(scale * z) + 1.0)
  Update default tanh_gain: 0.5 → 1.0
- Fix robust normalization to ensure [0,1] bounded output
  Add min-max rescaling after IQR standardization
- Refactor normalization functions to use canonical DEFAULTS_EXTREMA_WEIGHTING
  Eliminates hardcoded values in _normalize_{robust,softmax,tanh,rank}

README.md
quickadapter/user_data/strategies/Utils.py

index 1a19d8a2fea1ed599b8f077d300ef5f2833ff8ea..d983bf99165f655401978be08ea735152f69a659 100644 (file)
--- a/README.md
+++ b/README.md
@@ -61,7 +61,7 @@ docker compose up -d --build
 | freqai.extrema_weighting.gamma                       | 1.0               | float (0,10]                                                                                                                     | Contrast exponent applied after normalization (>1 emphasizes extremes, 0<gamma<1 softens).                                                                                                                 |
 | freqai.extrema_weighting.softmax_temperature         | 1.0               | float > 0                                                                                                                        | Temperature parameter for softmax normalization (lower values sharpen distribution, higher values flatten it).                                                                                             |
 | freqai.extrema_weighting.tanh_scale                  | 1.0               | float > 0                                                                                                                        | Scale parameter for tanh normalization.                                                                                                                                                                    |
-| freqai.extrema_weighting.tanh_gain                   | 0.5               | float > 0                                                                                                                        | Gain parameter for tanh normalization.                                                                                                                                                                     |
+| freqai.extrema_weighting.tanh_gain                   | 1.0               | float > 0                                                                                                                        | Gain parameter for tanh normalization.                                                                                                                                                                     |
 | freqai.extrema_weighting.robust_quantiles            | [0.25, 0.75]      | list[float] where 0 <= q_low < q_high <= 1                                                                                       | Quantile range for robust normalization.                                                                                                                                                                   |
 | freqai.extrema_weighting.rank_method                 | `average`         | enum {`average`,`min`,`max`,`dense`,`ordinal`}                                                                                   | Ranking method for rank normalization.                                                                                                                                                                     |
 | _Feature parameters_                                 |                   |                                                                                                                                  |                                                                                                                                                                                                            |
index 2f0ad92fa57150b43c4f60db3eb3d3e4e85364bc..97ee6453a963e96fc32624f197879e5b7764dab4 100644 (file)
@@ -68,8 +68,7 @@ DEFAULTS_EXTREMA_WEIGHTING: Final[dict[str, Any]] = {
     "strategy": WEIGHT_STRATEGIES[0],  # "none"
     "softmax_temperature": 1.0,
     "tanh_scale": 1.0,
-    "tanh_gain": 0.5,
-    "robust_quantiles": (0.25, 0.75),
+    "tanh_gain": 1.0,
     "rank_method": RANK_METHODS[0],  # "average"
 }
 
@@ -274,7 +273,8 @@ def _normalize_l2(weights: NDArray[np.floating]) -> NDArray[np.floating]:
 
 
 def _normalize_robust(
-    weights: NDArray[np.floating], quantiles: tuple[float, float] = (0.25, 0.75)
+    weights: NDArray[np.floating],
+    quantiles: tuple[float, float] = DEFAULTS_EXTREMA_WEIGHTING["robust_quantiles"],
 ) -> NDArray[np.floating]:
     weights = weights.astype(float, copy=False)
     if np.isnan(weights).any():
@@ -287,12 +287,22 @@ def _normalize_robust(
     if np.isclose(iqr, 0.0):
         return np.full_like(weights, float(DEFAULT_EXTREMA_WEIGHT), dtype=float)
 
-    normalized_weights = (weights - median) / iqr
+    robust_scores = (weights - median) / iqr
+
+    r_min = np.min(robust_scores)
+    r_max = np.max(robust_scores)
+    r_range = r_max - r_min
+
+    if np.isclose(r_range, 0.0):
+        return np.full_like(weights, float(DEFAULT_EXTREMA_WEIGHT), dtype=float)
+
+    normalized_weights = (robust_scores - r_min) / r_range
     return normalized_weights
 
 
 def _normalize_softmax(
-    weights: NDArray[np.floating], temperature: float = 1.0
+    weights: NDArray[np.floating],
+    temperature: float = DEFAULTS_EXTREMA_WEIGHTING["softmax_temperature"],
 ) -> NDArray[np.floating]:
     weights = weights.astype(float, copy=False)
     if np.isnan(weights).any():
@@ -303,19 +313,22 @@ def _normalize_softmax(
 
 
 def _normalize_tanh(
-    weights: NDArray[np.floating], scale: float = 1.0, gain: float = 0.5
+    weights: NDArray[np.floating],
+    scale: float = DEFAULTS_EXTREMA_WEIGHTING["tanh_scale"],
+    gain: float = DEFAULTS_EXTREMA_WEIGHTING["tanh_gain"],
 ) -> NDArray[np.floating]:
     weights = weights.astype(float, copy=False)
     if np.isnan(weights).any():
         return np.full_like(weights, float(DEFAULT_EXTREMA_WEIGHT), dtype=float)
 
     z_scores = _normalize_zscore(weights, rescale_to_unit_range=False)
-    normalized_weights = gain * (np.tanh(scale * z_scores) + 1.0)
+    normalized_weights = gain * 0.5 * (np.tanh(scale * z_scores) + 1.0)
     return normalized_weights
 
 
 def _normalize_rank(
-    weights: NDArray[np.floating], method: RankMethod = "average"
+    weights: NDArray[np.floating],
+    method: RankMethod = DEFAULTS_EXTREMA_WEIGHTING["rank_method"],
 ) -> NDArray[np.floating]:
     weights = weights.astype(float, copy=False)
     if np.isnan(weights).any():