]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
refactor: consolidate more constants
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Wed, 19 Nov 2025 11:30:12 +0000 (12:30 +0100)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Wed, 19 Nov 2025 11:30:12 +0000 (12:30 +0100)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
16 files changed:
ReforceXY/reward_space_analysis/test_reward_space_analysis_cli.py
ReforceXY/reward_space_analysis/tests/components/test_transforms.py
ReforceXY/reward_space_analysis/tests/conftest.py
ReforceXY/reward_space_analysis/tests/constants.py
ReforceXY/reward_space_analysis/tests/helpers/assertions.py
ReforceXY/reward_space_analysis/tests/helpers/configs.py
ReforceXY/reward_space_analysis/tests/integration/test_report_formatting.py
ReforceXY/reward_space_analysis/tests/pbrs/test_pbrs.py
ReforceXY/reward_space_analysis/tests/robustness/test_robustness.py
ReforceXY/reward_space_analysis/tests/statistics/test_feature_analysis_failures.py
ReforceXY/reward_space_analysis/tests/statistics/test_statistics.py
ReforceXY/reward_space_analysis/tests/test_base.py
ReforceXY/user_data/freqaimodels/ReforceXY.py
ReforceXY/user_data/strategies/RLAgentStrategy.py
quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py
quickadapter/user_data/strategies/QuickAdapterV3.py

index dc1a2e358394b0e791e8b8cfd5d51e31448ea996..ec8bd2dbe7921e7e8beb125db49425c11275e201 100644 (file)
@@ -54,6 +54,7 @@ try:
 except ImportError:
     from typing_extensions import NotRequired, Required  # Python <3.11
 
+
 ConfigTuple = Tuple[str, str, float, int, int, int]
 
 SUMMARY_FILENAME = "reward_space_cli.json"
@@ -103,14 +104,16 @@ def build_arg_matrix(
     max_scenarios: int = 40,
     shuffle_seed: Optional[int] = None,
 ) -> List[ConfigTuple]:
+    # Constants from reward_space_analysis.py
+    # ALLOWED_EXIT_POTENTIAL_MODES and ATTENUATION_MODES_WITH_LEGACY
     exit_potential_modes = [
         "canonical",
         "non_canonical",
         "progressive_release",
-        "retain_previous",
         "spike_cancel",
+        "retain_previous",
     ]
-    exit_attenuation_modes = ["linear", "sqrt", "power", "half_life", "legacy"]
+    exit_attenuation_modes = ["sqrt", "linear", "power", "half_life", "legacy"]
     potential_gammas = [0.0, 0.5, 0.95, 0.999]
     hold_enabled = [0, 1]
     entry_additive_enabled = [0, 1]
index 0b49410fbaa42de675d263aa299a83b08a1bb136..bbefdbe143c00afe04bb941fe50584d0d9b9c0a9 100644 (file)
@@ -8,7 +8,7 @@ import math
 
 import pytest
 
-from reward_space_analysis import apply_transform
+from reward_space_analysis import ALLOWED_TRANSFORMS, apply_transform
 
 from ..test_base import RewardSpaceTestBase
 
@@ -19,8 +19,8 @@ class TestTransforms(RewardSpaceTestBase):
     """Comprehensive transform function tests with parameterized scenarios."""
 
     # Transform function test data
-    SMOOTH_TRANSFORMS = ["tanh", "softsign", "arctan", "sigmoid", "asinh"]
-    ALL_TRANSFORMS = SMOOTH_TRANSFORMS + ["clip"]
+    SMOOTH_TRANSFORMS = [t for t in ALLOWED_TRANSFORMS if t != "clip"]
+    ALL_TRANSFORMS = list(ALLOWED_TRANSFORMS)
 
     def test_transform_exact_values(self):
         """Test transform functions produce exact expected values for specific inputs."""
index fbc00314f0bc7c6730ac69b6a21f581b901b2870..97d9fc487dd0e6845b0ce125593e40dbd68d6d81 100644 (file)
@@ -12,6 +12,8 @@ import pytest
 
 from reward_space_analysis import DEFAULT_MODEL_REWARD_PARAMETERS
 
+from .constants import SEEDS
+
 
 @pytest.fixture(scope="session")
 def temp_output_dir():
@@ -24,7 +26,7 @@ def temp_output_dir():
 @pytest.fixture(autouse=True)
 def setup_rng():
     """Configure RNG for reproducibility."""
-    np.random.seed(42)
+    np.random.seed(SEEDS.BASE)
 
 
 @pytest.fixture
index faf0f305bc0b1c1e1d2428ad4e924958bebdbff1..f5293e1c04e9d384265a3dc4af1344f4f09cd844 100644 (file)
@@ -81,10 +81,12 @@ class PBRSConfig:
     Attributes:
         TERMINAL_TOL: Terminal potential must be within this tolerance of zero (1e-09)
         MAX_ABS_SHAPING: Maximum absolute shaping value for bounded checks (10.0)
+        TERMINAL_PROBABILITY: Default probability of terminal state in sweeps (0.08)
     """
 
     TERMINAL_TOL: float = 1e-09
     MAX_ABS_SHAPING: float = 10.0
+    TERMINAL_PROBABILITY: float = 0.08
 
 
 @dataclass(frozen=True)
@@ -97,10 +99,12 @@ class StatisticalConfig:
     Attributes:
         BH_FP_RATE_THRESHOLD: Benjamini-Hochberg false positive rate threshold (0.15)
         BOOTSTRAP_DEFAULT_ITERATIONS: Default bootstrap resampling count (100)
+        EXIT_PROBABILITY_THRESHOLD: Probability threshold for exit events (0.15)
     """
 
     BH_FP_RATE_THRESHOLD: float = 0.15
     BOOTSTRAP_DEFAULT_ITERATIONS: int = 100
+    EXIT_PROBABILITY_THRESHOLD: float = 0.15
 
 
 @dataclass(frozen=True)
@@ -116,18 +120,62 @@ class TestSeeds:
         - BOOTSTRAP: Prime number for bootstrap confidence interval tests to ensure
           independence from other random sequences
         - HETEROSCEDASTICITY: Dedicated seed for variance structure validation tests
+        - SMOKE_TEST: Seed for smoke tests
+        - CANONICAL_SWEEP: Seed for canonical PBRS sweep tests
 
     Attributes:
         BASE: Default seed for standard tests (42)
         REPRODUCIBILITY: Seed for reproducibility validation (12345)
         BOOTSTRAP: Seed for bootstrap CI tests (999)
         HETEROSCEDASTICITY: Seed for heteroscedasticity tests (7890)
+        SMOKE_TEST: Seed for smoke tests (7)
+        CANONICAL_SWEEP: Seed for canonical sweep tests (123)
+
+        # PBRS-specific seeds
+        PBRS_INVARIANCE_1: Seed for PBRS invariance test case 1 (913)
+        PBRS_INVARIANCE_2: Seed for PBRS invariance test case 2 (515)
+        PBRS_TERMINAL: Seed for PBRS terminal potential tests (777)
+
+        # Feature analysis failure seeds
+        FEATURE_EMPTY: Seed for empty feature tests (17)
+        FEATURE_PRIME_11: Seed for feature test variant (11)
+        FEATURE_PRIME_13: Seed for feature test variant (13)
+        FEATURE_PRIME_21: Seed for feature test variant (21)
+        FEATURE_PRIME_33: Seed for feature test variant (33)
+        FEATURE_PRIME_47: Seed for feature test variant (47)
+        FEATURE_SMALL_5: Seed for small feature test (5)
+        FEATURE_SMALL_3: Seed for small feature test (3)
+
+        # Report formatting seeds
+        REPORT_FORMAT_1: Seed for report formatting test 1 (234)
+        REPORT_FORMAT_2: Seed for report formatting test 2 (321)
     """
 
     BASE: int = 42
     REPRODUCIBILITY: int = 12345
     BOOTSTRAP: int = 999
     HETEROSCEDASTICITY: int = 7890
+    SMOKE_TEST: int = 7
+    CANONICAL_SWEEP: int = 123
+
+    # PBRS-specific seeds
+    PBRS_INVARIANCE_1: int = 913
+    PBRS_INVARIANCE_2: int = 515
+    PBRS_TERMINAL: int = 777
+
+    # Feature analysis failure seeds
+    FEATURE_EMPTY: int = 17
+    FEATURE_PRIME_11: int = 11
+    FEATURE_PRIME_13: int = 13
+    FEATURE_PRIME_21: int = 21
+    FEATURE_PRIME_33: int = 33
+    FEATURE_PRIME_47: int = 47
+    FEATURE_SMALL_5: int = 5
+    FEATURE_SMALL_3: int = 3
+
+    # Report formatting seeds
+    REPORT_FORMAT_1: int = 234
+    REPORT_FORMAT_2: int = 321
 
 
 @dataclass(frozen=True)
@@ -166,17 +214,17 @@ class TestScenarios:
         DURATION_MEDIUM: Medium duration scenario (200)
         DURATION_LONG: Long duration scenario (300)
         DURATION_SCENARIOS: Standard duration test sequence
+        SAMPLE_SIZE_TINY: Tiny sample size for smoke tests (50)
         SAMPLE_SIZE_SMALL: Small sample size for quick tests (100)
         SAMPLE_SIZE_MEDIUM: Medium sample size for standard tests (400)
         SAMPLE_SIZE_LARGE: Large sample size for statistical power (800)
-        DEFAULT_SAMPLE_SIZE: Default for most tests (400)
+        SAMPLE_SIZE_CONST_DF: Sample size for constant dataframes (64)
+        SAMPLE_SIZE_SHIFT_SCALE: Sample size for shift/scale tests (256)
         PBRS_SIMULATION_STEPS: Number of steps for PBRS simulation tests (500)
-        NULL_HYPOTHESIS_SAMPLE_SIZE: Sample size for null hypothesis tests (400)
-        BOOTSTRAP_MINIMAL_ITERATIONS: Minimal bootstrap iterations for quick tests (25)
-        BOOTSTRAP_STANDARD_ITERATIONS: Standard bootstrap iterations (100)
-        HETEROSCEDASTICITY_MIN_EXITS: Minimum exits for heteroscedasticity validation (50)
-        CORRELATION_TEST_MIN_SIZE: Minimum sample size for correlation tests (200)
         MONTE_CARLO_ITERATIONS: Monte Carlo simulation iterations (160)
+        PBRS_SWEEP_ITERATIONS: Number of iterations for PBRS sweep tests (120)
+        BOOTSTRAP_MINIMAL_ITERATIONS: Minimal bootstrap iterations for quick tests (25)
+        BOOTSTRAP_EXTENDED_ITERATIONS: Extended bootstrap iterations (200)
     """
 
     DURATION_SHORT: int = 150
@@ -184,19 +232,19 @@ class TestScenarios:
     DURATION_LONG: int = 300
     DURATION_SCENARIOS: tuple[int, ...] = (150, 200, 300)
 
+    SAMPLE_SIZE_TINY: int = 50
     SAMPLE_SIZE_SMALL: int = 100
     SAMPLE_SIZE_MEDIUM: int = 400
     SAMPLE_SIZE_LARGE: int = 800
-    DEFAULT_SAMPLE_SIZE: int = 400
+    SAMPLE_SIZE_CONST_DF: int = 64
+    SAMPLE_SIZE_SHIFT_SCALE: int = 256
 
     # Specialized test scenario sizes
     PBRS_SIMULATION_STEPS: int = 500
-    NULL_HYPOTHESIS_SAMPLE_SIZE: int = 400
-    BOOTSTRAP_MINIMAL_ITERATIONS: int = 25
-    BOOTSTRAP_STANDARD_ITERATIONS: int = 100
-    HETEROSCEDASTICITY_MIN_EXITS: int = 50
-    CORRELATION_TEST_MIN_SIZE: int = 200
     MONTE_CARLO_ITERATIONS: int = 160
+    PBRS_SWEEP_ITERATIONS: int = 120
+    BOOTSTRAP_MINIMAL_ITERATIONS: int = 25
+    BOOTSTRAP_EXTENDED_ITERATIONS: int = 200
 
 
 @dataclass(frozen=True)
index f407463f976e7d3d29368412605993245382a5db..40e20d0a54fcc54a566f4572a3c980eeb63066b0 100644 (file)
@@ -14,6 +14,7 @@ from reward_space_analysis import (
     calculate_reward,
 )
 
+from ..constants import TOLERANCE
 from .configs import RewardScenarioConfig, ThresholdTestConfig, ValidationConfig
 
 
@@ -206,8 +207,8 @@ def assert_component_sum_integrity(
 
     Example:
         config = ValidationConfig(
-            tolerance_strict=1e-12,
-            tolerance_relaxed=1e-09,
+            tolerance_strict=TOLERANCE.IDENTITY_STRICT,
+            tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED,
             exclude_components=["reward_shaping"],
             component_description="core components"
         )
@@ -284,7 +285,7 @@ def assert_single_active_component(
 
     Example:
         assert_single_active_component(
-            self, breakdown, "exit_component", 1e-09,
+            self, breakdown, "exit_component", TOLERANCE.IDENTITY_RELAXED,
             ["hold_penalty", "idle_penalty", "invalid_penalty"]
         )
     """
@@ -329,7 +330,7 @@ def assert_single_active_component_with_additives(
 
     Example:
         assert_single_active_component_with_additives(
-            self, breakdown, "exit_component", 1e-09,
+            self, breakdown, "exit_component", TOLERANCE.IDENTITY_RELAXED,
             ["hold_penalty", "idle_penalty"],
             enforce_additives_zero=True
         )
@@ -366,10 +367,10 @@ def assert_reward_calculation_scenarios(
 
     Example:
         config = RewardScenarioConfig(
-            base_factor=90.0,
-            profit_target=0.06,
-            risk_reward_ratio=1.0,
-            tolerance_relaxed=1e-09
+            base_factor=PARAMS.BASE_FACTOR,
+            profit_target=PARAMS.PROFIT_TARGET,
+            risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+            tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
         )
         scenarios = [
             (idle_context, {}, "idle scenario"),
@@ -419,10 +420,10 @@ def assert_parameter_sensitivity_behavior(
 
     Example:
         config = RewardScenarioConfig(
-            base_factor=90.0,
-            profit_target=0.06,
-            risk_reward_ratio=1.0,
-            tolerance_relaxed=1e-09
+            base_factor=PARAMS.BASE_FACTOR,
+            profit_target=PARAMS.PROFIT_TARGET,
+            risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+            tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
         )
         variations = [
             {"exit_additive": 0.0},
@@ -619,7 +620,8 @@ def assert_exit_mode_mathematical_validation(
 
     Example:
         assert_exit_mode_mathematical_validation(
-            self, context, params, 90.0, 0.06, 1.0, 1e-09
+            self, context, params, PARAMS.BASE_FACTOR, PARAMS.PROFIT_TARGET,
+            PARAMS.RISK_REWARD_RATIO, TOLERANCE.IDENTITY_RELAXED
         )
     """
     duration_ratio = context.trade_duration / 100
@@ -704,14 +706,14 @@ def assert_multi_parameter_sensitivity(
 
     Example:
         config = RewardScenarioConfig(
-            base_factor=90.0,
-            profit_target=0.06,
-            risk_reward_ratio=1.0,
-            tolerance_relaxed=1e-09
+            base_factor=PARAMS.BASE_FACTOR,
+            profit_target=PARAMS.PROFIT_TARGET,
+            risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+            tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
         )
         test_cases = [
-            (0.0, 1.0, "zero profit target"),
-            (0.06, 1.0, "standard parameters"),
+            (0.0, PARAMS.RISK_REWARD_RATIO, "zero profit target"),
+            (PARAMS.PROFIT_TARGET, PARAMS.RISK_REWARD_RATIO, "standard parameters"),
             (0.06, 2.0, "high risk/reward ratio"),
         ]
         assert_multi_parameter_sensitivity(
@@ -783,10 +785,11 @@ def assert_hold_penalty_threshold_behavior(
                 (100, "at threshold"),
                 (150, "above threshold"),
             ],
-            tolerance=1e-09
+            tolerance=TOLERANCE.IDENTITY_RELAXED
         )
         assert_hold_penalty_threshold_behavior(
-            self, make_context, params, 90.0, 0.06, 1.0, config
+            self, make_context, params, PARAMS.BASE_FACTOR, PARAMS.PROFIT_TARGET,
+            PARAMS.RISK_REWARD_RATIO, config
         )
     """
     for trade_duration, description in config.test_cases:
@@ -1091,7 +1094,7 @@ def assert_exit_factor_kernel_fallback(
 
     f_bad = exit_factor_fn(base_factor, pnl, pnl_factor, duration_ratio, bad_params)
     f_ref = exit_factor_fn(base_factor, pnl, pnl_factor, duration_ratio, reference_params)
-    test_case.assertAlmostEqual(f_bad, f_ref, delta=1e-12)
+    test_case.assertAlmostEqual(f_bad, f_ref, delta=TOLERANCE.IDENTITY_STRICT)
     test_case.assertGreaterEqual(f_bad, 0.0)
 
 
index f759c142f3d0c5c5376b93e7a82d5986ed3064e6..36a1cb856039fc9f447faaaf0a2f3d72627df29c 100644 (file)
@@ -7,12 +7,13 @@ reducing parameter proliferation.
 
 Usage:
     from tests.helpers.configs import RewardScenarioConfig
+    from tests.constants import PARAMS, TOLERANCE
 
     config = RewardScenarioConfig(
-        base_factor=90.0,
-        profit_target=0.06,
-        risk_reward_ratio=1.0,
-        tolerance_relaxed=1e-09
+        base_factor=PARAMS.BASE_FACTOR,
+        profit_target=PARAMS.PROFIT_TARGET,
+        risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+        tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
     )
 
     assert_reward_calculation_scenarios(
@@ -23,6 +24,8 @@ Usage:
 from dataclasses import dataclass
 from typing import Callable, Optional
 
+from ..constants import SEEDS, STATISTICAL, TOLERANCE
+
 
 @dataclass
 class RewardScenarioConfig:
@@ -62,8 +65,8 @@ class ValidationConfig:
         component_description: Human-readable description of validated components
     """
 
-    tolerance_strict: float
-    tolerance_relaxed: float
+    tolerance_strict: float = TOLERANCE.IDENTITY_STRICT
+    tolerance_relaxed: float = TOLERANCE.IDENTITY_RELAXED
     exclude_components: Optional[list[str]] = None
     component_description: str = "reward components"
 
@@ -131,7 +134,7 @@ class ExitFactorConfig:
     attenuation_mode: str
     plateau_enabled: bool = False
     plateau_grace: float = 0.0
-    tolerance: float = 1e-09
+    tolerance: float = TOLERANCE.IDENTITY_RELAXED
 
 
 @dataclass
@@ -149,9 +152,9 @@ class StatisticalTestConfig:
         alpha: Significance level
     """
 
-    n_bootstrap: int = 100
+    n_bootstrap: int = STATISTICAL.BOOTSTRAP_DEFAULT_ITERATIONS
     confidence_level: float = 0.95
-    seed: int = 42
+    seed: int = SEEDS.BASE
     adjust_method: Optional[str] = None
     alpha: float = 0.05
 
index 7a59e9f86591bbe084327d9b65f7bc62ad8e0276..db963c037294b9d1aa0292a2b552a7e1da97f78c 100644 (file)
@@ -13,7 +13,7 @@ import pytest
 
 from reward_space_analysis import PBRS_INVARIANCE_TOL, write_complete_statistical_analysis
 
-from ..constants import SCENARIOS
+from ..constants import SCENARIOS, SEEDS
 from ..test_base import RewardSpaceTestBase
 
 pytestmark = pytest.mark.integration
@@ -78,7 +78,7 @@ class TestReportFormatting(RewardSpaceTestBase):
             real_df=real_df,
             adjust_method="none",
             strict_diagnostics=False,
-            bootstrap_resamples=SCENARIOS.BOOTSTRAP_STANDARD_ITERATIONS,  # keep test fast
+            bootstrap_resamples=SCENARIOS.SAMPLE_SIZE_SMALL,  # keep test fast
             skip_partial_dependence=kwargs.get("skip_partial_dependence", False),
             skip_feature_analysis=kwargs.get("skip_feature_analysis", False),
         )
@@ -114,7 +114,7 @@ class TestReportFormatting(RewardSpaceTestBase):
     def test_distribution_shift_section_present_with_real_episodes(self):
         """Distribution Shift section renders metrics table when real episodes provided."""
         # Synthetic df (ensure >=10 non-NaN per feature)
-        synth_df = self.make_stats_df(n=60, seed=123)
+        synth_df = self.make_stats_df(n=60, seed=SEEDS.REPORT_FORMAT_1)
         # Real df: shift slightly (different mean) so metrics non-zero
         real_df = synth_df.copy()
         real_df["pnl"] = real_df["pnl"] + 0.001  # small mean shift
@@ -139,7 +139,7 @@ class TestReportFormatting(RewardSpaceTestBase):
     def test_partial_dependence_redundancy_note_emitted(self):
         """Redundancy note appears when both feature analysis and partial dependence skipped."""
         df = self.make_stats_df(
-            n=10, seed=321
+            n=10, seed=SEEDS.REPORT_FORMAT_2
         )  # small but >=4 so skip_feature_analysis flag drives behavior
         content = self._write_report(
             df,
index 8329bb4f6bb0c03ca0964338d33bbe1176df2044..cf41459c795c6134c2d6152199815718b078e213 100644 (file)
@@ -22,6 +22,7 @@ from reward_space_analysis import (
     write_complete_statistical_analysis,
 )
 
+from ..constants import SEEDS
 from ..helpers import (
     assert_non_canonical_shaping_exceeds,
     assert_pbrs_canonical_sum_within_tolerance,
@@ -137,7 +138,7 @@ class TestPBRS(RewardSpaceTestBase):
         )
         df = simulate_samples(
             params={**params, "max_trade_duration_candles": 100},
-            num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
+            num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
             seed=self.SEED,
             base_factor=self.TEST_BASE_FACTOR,
             profit_target=self.TEST_PROFIT_TARGET,
@@ -165,7 +166,7 @@ class TestPBRS(RewardSpaceTestBase):
         )
         df = simulate_samples(
             params={**params, "max_trade_duration_candles": 100},
-            num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
+            num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
             seed=self.SEED,
             base_factor=self.TEST_BASE_FACTOR,
             profit_target=self.TEST_PROFIT_TARGET,
@@ -478,7 +479,7 @@ class TestPBRS(RewardSpaceTestBase):
         df = simulate_samples(
             params={**params, "max_trade_duration_candles": 140},
             num_samples=SCENARIOS.SAMPLE_SIZE_LARGE // 2,  # 500 ≈ 400 (keep original intent)
-            seed=913,
+            seed=SEEDS.PBRS_INVARIANCE_1,
             base_factor=self.TEST_BASE_FACTOR,
             profit_target=self.TEST_PROFIT_TARGET,
             risk_reward_ratio=self.TEST_RR,
@@ -516,7 +517,7 @@ class TestPBRS(RewardSpaceTestBase):
             df_exc = simulate_samples(
                 params={**params, "max_trade_duration_candles": 120},
                 num_samples=250,
-                seed=515,
+                seed=SEEDS.PBRS_INVARIANCE_2,
                 base_factor=self.TEST_BASE_FACTOR,
                 profit_target=self.TEST_PROFIT_TARGET,
                 risk_reward_ratio=self.TEST_RR,
@@ -547,8 +548,8 @@ class TestPBRS(RewardSpaceTestBase):
         )
         df_can = simulate_samples(
             params={**params_can, "max_trade_duration_candles": 120},
-            num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
-            seed=777,
+            num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
+            seed=SEEDS.PBRS_TERMINAL,
             base_factor=self.TEST_BASE_FACTOR,
             profit_target=self.TEST_PROFIT_TARGET,
             risk_reward_ratio=self.TEST_RR,
@@ -566,8 +567,8 @@ class TestPBRS(RewardSpaceTestBase):
         )
         df_non = simulate_samples(
             params={**params_non, "max_trade_duration_candles": 120},
-            num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
-            seed=777,
+            num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
+            seed=SEEDS.PBRS_TERMINAL,
             base_factor=self.TEST_BASE_FACTOR,
             profit_target=self.TEST_PROFIT_TARGET,
             risk_reward_ratio=self.TEST_RR,
@@ -602,8 +603,8 @@ class TestPBRS(RewardSpaceTestBase):
             m2 = np.mean(c**2)
             m3 = np.mean(c**3)
             m4 = np.mean(c**4)
-            skew = m3 / (m2**1.5 + 1e-18)
-            kurt = m4 / (m2**2 + 1e-18) - 3.0
+            skew = m3 / (m2**1.5 + self.TOL_NUMERIC_GUARD)
+            kurt = m4 / (m2**2 + self.TOL_NUMERIC_GUARD) - 3.0
             return (float(skew), float(kurt))
 
         s_base, k_base = _skew_kurt(base)
@@ -751,8 +752,10 @@ class TestPBRS(RewardSpaceTestBase):
         rng = np.random.default_rng(321)
         last_potential = 0.0
         shaping_sum = 0.0
+        from ..constants import STATISTICAL
+
         for _ in range(SCENARIOS.MONTE_CARLO_ITERATIONS):
-            is_exit = rng.uniform() < 0.15
+            is_exit = rng.uniform() < STATISTICAL.EXIT_PROBABILITY_THRESHOLD
             next_pnl = 0.0 if is_exit else float(rng.normal(0, 0.07))
             next_dur = 0.0 if is_exit else float(rng.uniform(0, 1))
             _tot, shap, next_pot, _pbrs_delta, _entry_additive, _exit_additive = (
@@ -1020,8 +1023,10 @@ class TestPBRS(RewardSpaceTestBase):
         """Report generation without PBRS columns triggers absence + shift placeholder."""
         import pandas as pd
 
+        from ..constants import SEEDS
+
         n = 90
-        rng = np.random.default_rng(123)
+        rng = np.random.default_rng(SEEDS.CANONICAL_SWEEP)
         df = pd.DataFrame(
             {
                 "reward": rng.normal(0.05, 0.02, n),
index 6213e0126d0fde03484ccb1aed0a9bc6bb36d468..c89d7c884bb7e96cd83d7567975d1ffb10a30d24 100644 (file)
@@ -436,7 +436,7 @@ class TestRewardRobustnessAndBoundaries(RewardSpaceTestBase):
 
     def test_plateau_continuity_at_grace_boundary(self):
         """Test plateau continuity at grace boundary."""
-        modes = ["sqrt", "linear", "power", "half_life"]
+        modes = list(ATTENUATION_MODES)
         grace = 0.8
         eps = self.CONTINUITY_EPS_SMALL
         base_factor = self.TEST_BASE_FACTOR
index 2582ae0c9dac22084c015e32f4725a2577e16272..affae2a2a7e5ed466d500b104a271eedcc89a253 100644 (file)
@@ -17,6 +17,7 @@ import pandas as pd
 import pytest
 
 from reward_space_analysis import _perform_feature_analysis  # type: ignore
+from tests.constants import SEEDS
 
 pytestmark = pytest.mark.statistics
 
@@ -41,7 +42,7 @@ def _minimal_df(n: int = 30) -> pd.DataFrame:
 def test_feature_analysis_missing_reward_column():
     df = _minimal_df().drop(columns=["reward"])  # remove reward
     importance_df, stats, partial_deps, model = _perform_feature_analysis(
-        df, seed=7, skip_partial_dependence=True
+        df, seed=SEEDS.FEATURE_EMPTY, skip_partial_dependence=True
     )
     assert importance_df.empty
     assert stats["model_fitted"] is False
@@ -53,7 +54,7 @@ def test_feature_analysis_missing_reward_column():
 def test_feature_analysis_empty_frame():
     df = _minimal_df(0)  # empty
     importance_df, stats, partial_deps, model = _perform_feature_analysis(
-        df, seed=7, skip_partial_dependence=True
+        df, seed=SEEDS.FEATURE_EMPTY, skip_partial_dependence=True
     )
     assert importance_df.empty
     assert stats["n_features"] == 0
@@ -63,7 +64,7 @@ def test_feature_analysis_empty_frame():
 def test_feature_analysis_single_feature_path():
     df = pd.DataFrame({"pnl": np.random.normal(0, 1, 25), "reward": np.random.normal(0, 1, 25)})
     importance_df, stats, partial_deps, model = _perform_feature_analysis(
-        df, seed=11, skip_partial_dependence=True
+        df, seed=SEEDS.FEATURE_PRIME_11, skip_partial_dependence=True
     )
     assert stats["n_features"] == 1
     # Importance stub path returns NaNs
@@ -81,7 +82,7 @@ def test_feature_analysis_nans_present_path():
         }
     )
     importance_df, stats, partial_deps, model = _perform_feature_analysis(
-        df, seed=13, skip_partial_dependence=True
+        df, seed=SEEDS.FEATURE_PRIME_13, skip_partial_dependence=True
     )
     # Should hit NaN stub path (model_fitted False)
     assert stats["model_fitted"] is False
@@ -101,7 +102,7 @@ def test_feature_analysis_model_fitting_failure(monkeypatch):
     monkeypatch.setattr(RandomForestRegressor, "fit", boom)
     df = _minimal_df(50)
     importance_df, stats, partial_deps, model = _perform_feature_analysis(
-        df, seed=21, skip_partial_dependence=True
+        df, seed=SEEDS.FEATURE_PRIME_21, skip_partial_dependence=True
     )
     assert stats["model_fitted"] is False
     assert model is None
@@ -117,7 +118,7 @@ def test_feature_analysis_permutation_failure_partial_dependence(monkeypatch):
     monkeypatch.setattr("reward_space_analysis.permutation_importance", perm_boom)
     df = _minimal_df(60)
     importance_df, stats, partial_deps, model = _perform_feature_analysis(
-        df, seed=33, skip_partial_dependence=False
+        df, seed=SEEDS.FEATURE_PRIME_33, skip_partial_dependence=False
     )
     assert stats["model_fitted"] is True
     # Importance should be NaNs due to failure
@@ -130,7 +131,7 @@ def test_feature_analysis_permutation_failure_partial_dependence(monkeypatch):
 def test_feature_analysis_success_partial_dependence():
     df = _minimal_df(70)
     importance_df, stats, partial_deps, model = _perform_feature_analysis(
-        df, seed=47, skip_partial_dependence=False
+        df, seed=SEEDS.FEATURE_PRIME_47, skip_partial_dependence=False
     )
     # Expect at least one non-NaN importance (model fitted path)
     assert importance_df["importance_mean"].notna().any()
@@ -148,7 +149,7 @@ def test_feature_analysis_import_fallback(monkeypatch):
     monkeypatch.setattr("reward_space_analysis.r2_score", None)
     df = _minimal_df(10)
     with pytest.raises(ImportError):
-        _perform_feature_analysis(df, seed=5, skip_partial_dependence=True)
+        _perform_feature_analysis(df, seed=SEEDS.FEATURE_SMALL_5, skip_partial_dependence=True)
 
 
 def test_module_level_sklearn_import_failure_reload():
@@ -188,7 +189,9 @@ def test_module_level_sklearn_import_failure_reload():
         # Perform feature analysis should raise ImportError under missing components
         df = _minimal_df(15)
         with pytest.raises(ImportError):
-            rsa_fallback._perform_feature_analysis(df, seed=3, skip_partial_dependence=True)  # type: ignore[attr-defined]
+            rsa_fallback._perform_feature_analysis(
+                df, seed=SEEDS.FEATURE_SMALL_3, skip_partial_dependence=True
+            )  # type: ignore[attr-defined]
     finally:
         # Restore importer
         builtins.__import__ = orig_import
index c0966ae21d9e3de7f797be8c008d2cb06cf7b2b6..f632242f2412fabd1c72fefbfe3854adc7fed221 100644 (file)
@@ -266,7 +266,7 @@ class TestStatistics(RewardSpaceTestBase):
         """Equal scaling keeps KL/JS ≈0."""
         from ..constants import SCENARIOS, STAT_TOL
 
-        df1 = self._shift_scale_df(SCENARIOS.DEFAULT_SAMPLE_SIZE)
+        df1 = self._shift_scale_df(SCENARIOS.SAMPLE_SIZE_MEDIUM)
         scale = 3.5
         df2 = df1.copy()
         df2["pnl"] *= scale
@@ -299,7 +299,7 @@ class TestStatistics(RewardSpaceTestBase):
         from ..constants import SCENARIOS
 
         rng = np.random.default_rng(1234)
-        n = SCENARIOS.NULL_HYPOTHESIS_SAMPLE_SIZE
+        n = SCENARIOS.SAMPLE_SIZE_MEDIUM
         df = pd.DataFrame(
             {
                 "pnl": rng.normal(0, 1, n),
@@ -423,7 +423,7 @@ class TestStatistics(RewardSpaceTestBase):
             pnl_duration_vol_scale=self.TEST_PNL_DUR_VOL_SCALE,
         )
         exit_data = df[df["reward_exit"] != 0].copy()
-        if len(exit_data) < SCENARIOS.HETEROSCEDASTICITY_MIN_EXITS:
+        if len(exit_data) < SCENARIOS.SAMPLE_SIZE_TINY:
             self.skipTest("Insufficient exit actions for heteroscedasticity test")
         exit_data["duration_bin"] = pd.cut(
             exit_data["duration_ratio"], bins=4, labels=["Q1", "Q2", "Q3", "Q4"]
index 1cf8b5696da658e787f960172299b255b36e804d..04beff2741697887cab625dda8746f211c23be16 100644 (file)
@@ -24,6 +24,9 @@ from .constants import (
     CONTINUITY,
     EXIT_FACTOR,
     PBRS,
+    SCENARIOS,
+    SEEDS,
+    STATISTICAL,
     TOLERANCE,
 )
 
@@ -44,9 +47,9 @@ class RewardSpaceTestBase(unittest.TestCase):
     @classmethod
     def setUpClass(cls):
         """Set up class-level constants."""
-        cls.SEED = 42
+        cls.SEED = SEEDS.BASE
         cls.DEFAULT_PARAMS = DEFAULT_MODEL_REWARD_PARAMETERS.copy()
-        cls.TEST_SAMPLES = 50
+        cls.TEST_SAMPLES = SCENARIOS.SAMPLE_SIZE_TINY
         cls.TEST_BASE_FACTOR = 100.0
         cls.TEST_PROFIT_TARGET = 0.03
         cls.TEST_RR = 1.0
@@ -54,15 +57,15 @@ class RewardSpaceTestBase(unittest.TestCase):
         cls.TEST_PNL_STD = 0.02
         cls.TEST_PNL_DUR_VOL_SCALE = 0.5
         # Seeds for different test contexts
-        cls.SEED_SMOKE_TEST = 7
-        cls.SEED_REPRODUCIBILITY = 777
-        cls.SEED_BOOTSTRAP = 2024
-        cls.SEED_HETEROSCEDASTICITY = 123
+        cls.SEED_SMOKE_TEST = SEEDS.SMOKE_TEST
+        cls.SEED_REPRODUCIBILITY = SEEDS.REPRODUCIBILITY
+        cls.SEED_BOOTSTRAP = SEEDS.BOOTSTRAP
+        cls.SEED_HETEROSCEDASTICITY = SEEDS.HETEROSCEDASTICITY
         # Statistical test thresholds
-        cls.BOOTSTRAP_DEFAULT_ITERATIONS = 200
-        cls.BH_FP_RATE_THRESHOLD = 0.15
-        cls.EXIT_FACTOR_SCALING_RATIO_MIN = 5.0
-        cls.EXIT_FACTOR_SCALING_RATIO_MAX = 15.0
+        cls.BOOTSTRAP_DEFAULT_ITERATIONS = SCENARIOS.BOOTSTRAP_EXTENDED_ITERATIONS
+        cls.BH_FP_RATE_THRESHOLD = STATISTICAL.BH_FP_RATE_THRESHOLD
+        cls.EXIT_FACTOR_SCALING_RATIO_MIN = EXIT_FACTOR.SCALING_RATIO_MIN
+        cls.EXIT_FACTOR_SCALING_RATIO_MAX = EXIT_FACTOR.SCALING_RATIO_MAX
 
     def setUp(self):
         """Set up test fixtures with reproducible random seed."""
@@ -99,8 +102,8 @@ class RewardSpaceTestBase(unittest.TestCase):
     MIN_EXIT_POWER_TAU = EXIT_FACTOR.MIN_POWER_TAU
 
     # Test-specific constants
-    PBRS_TERMINAL_PROB = 0.08
-    PBRS_SWEEP_ITER = 120
+    PBRS_TERMINAL_PROB = PBRS.TERMINAL_PROBABILITY
+    PBRS_SWEEP_ITER = SCENARIOS.PBRS_SWEEP_ITERATIONS
     JS_DISTANCE_UPPER_BOUND = math.sqrt(math.log(2.0))
 
     def make_ctx(
@@ -137,7 +140,7 @@ class RewardSpaceTestBase(unittest.TestCase):
         *,
         iterations: Optional[int] = None,
         terminal_prob: Optional[float] = None,
-        seed: int = 123,
+        seed: int = SEEDS.CANONICAL_SWEEP,
     ) -> tuple[list[float], list[float]]:
         """Run a lightweight canonical invariance sweep.
 
@@ -273,7 +276,7 @@ class RewardSpaceTestBase(unittest.TestCase):
         if diff <= tolerance:
             return
         if rtol is not None:
-            scale = max(abs(first), abs(second), 1e-15)
+            scale = max(abs(first), abs(second), self.TOL_NEGLIGIBLE)
             if diff <= rtol * scale:
                 return
         self.fail(
@@ -407,12 +410,12 @@ class RewardSpaceTestBase(unittest.TestCase):
         self.assertAlmostEqualFloat(va, vb, tolerance=atol, rtol=rtol, msg=msg)
 
     @staticmethod
-    def seed_all(seed: int = 123) -> None:
+    def seed_all(seed: int = SEEDS.CANONICAL_SWEEP) -> None:
         """Seed all RNGs used (numpy & random)."""
         np.random.seed(seed)
         random.seed(seed)
 
-    def _const_df(self, n: int = 64) -> pd.DataFrame:
+    def _const_df(self, n: int = SCENARIOS.SAMPLE_SIZE_CONST_DF) -> pd.DataFrame:
         return pd.DataFrame(
             {
                 "reward": np.ones(n) * 0.5,
@@ -422,8 +425,10 @@ class RewardSpaceTestBase(unittest.TestCase):
             }
         )
 
-    def _shift_scale_df(self, n: int = 256, shift: float = 0.0, scale: float = 1.0) -> pd.DataFrame:
-        rng = np.random.default_rng(123)
+    def _shift_scale_df(
+        self, n: int = SCENARIOS.SAMPLE_SIZE_SHIFT_SCALE, shift: float = 0.0, scale: float = 1.0
+    ) -> pd.DataFrame:
+        rng = np.random.default_rng(SEEDS.CANONICAL_SWEEP)
         base = rng.normal(0, 1, n)
         return pd.DataFrame(
             {
index 37ab6e2aa3dc96f31fd741bfdf947f53df0733e3..b43f422ca0add068e88b4677a1f2bd05ce848197 100644 (file)
@@ -11,7 +11,9 @@ from pathlib import Path
 from typing import (
     Any,
     Callable,
+    ClassVar,
     Dict,
+    Final,
     List,
     Literal,
     Optional,
@@ -143,25 +145,25 @@ class ReforceXY(BaseReinforcementLearningModel):
         - pip install optuna-dashboard
     """
 
-    _LOG_2 = math.log(2.0)
-    DEFAULT_IDLE_DURATION_MULTIPLIER: int = 4
+    _LOG_2: Final[float] = math.log(2.0)
+    DEFAULT_IDLE_DURATION_MULTIPLIER: Final[int] = 4
 
-    _MODEL_TYPES: tuple[ModelType, ...] = (
+    _MODEL_TYPES: Final[tuple[ModelType, ...]] = (
         "PPO",
         "RecurrentPPO",
         "MaskablePPO",
         "DQN",
         "QRDQN",
     )
-    _SCHEDULE_TYPES: tuple[ScheduleType, ...] = ("linear", "constant", "unknown")
-    _EXIT_POTENTIAL_MODES: tuple[ExitPotentialMode, ...] = (
+    _SCHEDULE_TYPES: Final[tuple[ScheduleType, ...]] = ("linear", "constant", "unknown")
+    _EXIT_POTENTIAL_MODES: Final[tuple[ExitPotentialMode, ...]] = (
         "canonical",
         "non_canonical",
         "progressive_release",
         "spike_cancel",
         "retain_previous",
     )
-    _TRANSFORM_FUNCTIONS: tuple[TransformFunction, ...] = (
+    _TRANSFORM_FUNCTIONS: Final[tuple[TransformFunction, ...]] = (
         "tanh",
         "softsign",
         "arctan",
@@ -169,30 +171,30 @@ class ReforceXY(BaseReinforcementLearningModel):
         "asinh",
         "clip",
     )
-    _EXIT_ATTENUATION_MODES: tuple[ExitAttenuationMode, ...] = (
+    _EXIT_ATTENUATION_MODES: Final[tuple[ExitAttenuationMode, ...]] = (
         "legacy",
         "sqrt",
         "linear",
         "power",
         "half_life",
     )
-    _ACTIVATION_FUNCTIONS: tuple[ActivationFunction, ...] = (
+    _ACTIVATION_FUNCTIONS: Final[tuple[ActivationFunction, ...]] = (
         "tanh",
         "relu",
         "elu",
         "leaky_relu",
     )
-    _OPTIMIZER_CLASSES: tuple[OptimizerClass, ...] = ("adam", "adamw", "rmsprop")
-    _NET_ARCH_SIZES: tuple[NetArchSize, ...] = (
+    _OPTIMIZER_CLASSES: Final[tuple[OptimizerClass, ...]] = ("adam", "adamw", "rmsprop")
+    _NET_ARCH_SIZES: Final[tuple[NetArchSize, ...]] = (
         "small",
         "medium",
         "large",
         "extra_large",
     )
-    _STORAGE_BACKENDS: tuple[StorageBackend, ...] = ("sqlite", "file")
-    _SAMPLER_TYPES: tuple[SamplerType, ...] = ("tpe", "auto")
+    _STORAGE_BACKENDS: Final[tuple[StorageBackend, ...]] = ("sqlite", "file")
+    _SAMPLER_TYPES: Final[tuple[SamplerType, ...]] = ("tpe", "auto")
 
-    _action_masks_cache: Dict[Tuple[bool, float], NDArray[np.bool_]] = {}
+    _action_masks_cache: ClassVar[Dict[Tuple[bool, float], NDArray[np.bool_]]] = {}
 
     @staticmethod
     def _model_types_set() -> set[ModelType]:
@@ -1375,7 +1377,6 @@ class ReforceXY(BaseReinforcementLearningModel):
         logger.info("Trial %s params: %s", trial.number, params)
 
         # "PPO"
-
         if self._MODEL_TYPES[0] in self.model_type:
             n_steps = params.get("n_steps", 0)
             if n_steps > 0:
@@ -4016,7 +4017,7 @@ def sample_params_ppo(trial: Trial) -> Dict[str, Any]:
     Sampler for PPO hyperparams
     """
     return convert_optuna_params_to_model_params(
-        "PPO", get_common_ppo_optuna_params(trial)
+        ReforceXY._MODEL_TYPES[0], get_common_ppo_optuna_params(trial)
     )
 
 
@@ -4096,7 +4097,7 @@ def sample_params_dqn(trial: Trial) -> Dict[str, Any]:
     Sampler for DQN hyperparams
     """
     return convert_optuna_params_to_model_params(
-        "DQN", get_common_dqn_optuna_params(trial)
+        ReforceXY._MODEL_TYPES[3], get_common_dqn_optuna_params(trial)
     )
 
 
@@ -4106,4 +4107,6 @@ def sample_params_qrdqn(trial: Trial) -> Dict[str, Any]:
     """
     dqn_optuna_params = get_common_dqn_optuna_params(trial)
     dqn_optuna_params.update({"n_quantiles": trial.suggest_int("n_quantiles", 10, 160)})
-    return convert_optuna_params_to_model_params("QRDQN", dqn_optuna_params)
+    return convert_optuna_params_to_model_params(
+        ReforceXY._MODEL_TYPES[4], dqn_optuna_params
+    )
index 7e2330140ec30f58454c16e46deb3509274e9ae5..1c37ecf15ec9d442c6c4d5364c22137baaf3afc2 100644 (file)
@@ -1,7 +1,7 @@
 import datetime
 import logging
 from functools import cached_property, reduce
-from typing import Any, Literal, Optional
+from typing import Any, Final, Literal, Optional
 
 # import talib.abstract as ta
 from freqtrade.persistence import Trade
@@ -13,7 +13,7 @@ TradeDirection = Literal["long", "short"]
 
 logger = logging.getLogger(__name__)
 
-ACTION_COLUMN = "&-action"
+ACTION_COLUMN: Final = "&-action"
 
 
 class RLAgentStrategy(IStrategy):
@@ -23,12 +23,12 @@ class RLAgentStrategy(IStrategy):
 
     INTERFACE_VERSION = 3
 
-    _TRADING_MODES: tuple[TradingMode, ...] = ("margin", "futures", "spot")
-    _TRADE_DIRECTIONS: tuple[TradeDirection, ...] = ("long", "short")
-    _ACTION_ENTER_LONG: int = 1
-    _ACTION_EXIT_LONG: int = 2
-    _ACTION_ENTER_SHORT: int = 3
-    _ACTION_EXIT_SHORT: int = 4
+    _TRADING_MODES: Final[tuple[TradingMode, ...]] = ("margin", "futures", "spot")
+    _TRADE_DIRECTIONS: Final[tuple[TradeDirection, ...]] = ("long", "short")
+    _ACTION_ENTER_LONG: Final[int] = 1
+    _ACTION_EXIT_LONG: Final[int] = 2
+    _ACTION_ENTER_SHORT: Final[int] = 3
+    _ACTION_EXIT_SHORT: Final[int] = 4
 
     @cached_property
     def can_short(self) -> bool:
index 6c5c977229940b466a53d539fd381d602cbb85b7..884a6a046cbef1d92774638468b3cdb32862ca7e 100644 (file)
@@ -6,7 +6,7 @@ import time
 import warnings
 from functools import cached_property
 from pathlib import Path
-from typing import Any, Callable, Literal, Optional
+from typing import Any, Callable, Final, Literal, Optional
 
 import numpy as np
 import optuna
@@ -38,11 +38,11 @@ OptunaNamespace = Literal["hp", "train", "label"]
 
 debug = False
 
-TEST_SIZE = 0.1
+TEST_SIZE: Final = 0.1
 
-EXTREMA_COLUMN = "&s-extrema"
-MAXIMA_THRESHOLD_COLUMN = "&s-maxima_threshold"
-MINIMA_THRESHOLD_COLUMN = "&s-minima_threshold"
+EXTREMA_COLUMN: Final = "&s-extrema"
+MAXIMA_THRESHOLD_COLUMN: Final = "&s-maxima_threshold"
+MINIMA_THRESHOLD_COLUMN: Final = "&s-minima_threshold"
 
 warnings.simplefilter(action="ignore", category=FutureWarning)
 
@@ -68,15 +68,15 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
 
     version = "3.7.121"
 
-    _SQRT_2 = np.sqrt(2.0)
+    _SQRT_2: Final[float] = np.sqrt(2.0)
 
-    _EXTREMA_SELECTION_METHODS: tuple[ExtremaSelectionMethod, ...] = (
+    _EXTREMA_SELECTION_METHODS: Final[tuple[ExtremaSelectionMethod, ...]] = (
         "peak_values",
         "extrema_rank",
     )
-    _OPTUNA_STORAGE_BACKENDS: tuple[str, ...] = ("sqlite", "file")
-    _OPTUNA_SAMPLERS: tuple[str, ...] = ("tpe", "auto")
-    _OPTUNA_NAMESPACES: tuple[OptunaNamespace, ...] = ("hp", "train", "label")
+    _OPTUNA_STORAGE_BACKENDS: Final[tuple[str, ...]] = ("sqlite", "file")
+    _OPTUNA_SAMPLERS: Final[tuple[str, ...]] = ("tpe", "auto")
+    _OPTUNA_NAMESPACES: Final[tuple[OptunaNamespace, ...]] = ("hp", "train", "label")
 
     @staticmethod
     def _extrema_selection_methods_set() -> set[ExtremaSelectionMethod]:
index cbafd5fd872db40e6c92be13247fb5fcde8a9bfd..4821de199cba25690f18a6754b2cd8a68e7988e3 100644 (file)
@@ -5,7 +5,7 @@ import logging
 import math
 from functools import cached_property, lru_cache, reduce
 from pathlib import Path
-from typing import Any, Callable, Literal, Optional, Sequence, Tuple
+from typing import Any, Callable, ClassVar, Final, Literal, Optional, Sequence, Tuple
 
 import numpy as np
 import pandas_ta as pta
@@ -55,9 +55,9 @@ debug = False
 
 logger = logging.getLogger(__name__)
 
-EXTREMA_COLUMN = "&s-extrema"
-MAXIMA_THRESHOLD_COLUMN = "&s-maxima_threshold"
-MINIMA_THRESHOLD_COLUMN = "&s-minima_threshold"
+EXTREMA_COLUMN: Final = "&s-extrema"
+MAXIMA_THRESHOLD_COLUMN: Final = "&s-maxima_threshold"
+MINIMA_THRESHOLD_COLUMN: Final = "&s-minima_threshold"
 
 
 class QuickAdapterV3(IStrategy):
@@ -79,13 +79,13 @@ class QuickAdapterV3(IStrategy):
 
     INTERFACE_VERSION = 3
 
-    _TRADE_DIRECTIONS: tuple[TradeDirection, ...] = ("long", "short")
-    _INTERPOLATION_DIRECTIONS: tuple[InterpolationDirection, ...] = (
+    _TRADE_DIRECTIONS: Final[tuple[TradeDirection, ...]] = ("long", "short")
+    _INTERPOLATION_DIRECTIONS: Final[tuple[InterpolationDirection, ...]] = (
         "direct",
         "inverse",
     )
-    _ORDER_TYPES: tuple[OrderType, ...] = ("entry", "exit")
-    _TRADING_MODES: tuple[TradingMode, ...] = ("spot", "margin", "futures")
+    _ORDER_TYPES: Final[tuple[OrderType, ...]] = ("entry", "exit")
+    _TRADING_MODES: Final[tuple[TradingMode, ...]] = ("spot", "margin", "futures")
 
     def version(self) -> str:
         return "3.3.171"
@@ -95,16 +95,16 @@ class QuickAdapterV3(IStrategy):
     stoploss = -0.025
     use_custom_stoploss = True
 
-    default_exit_thresholds: dict[str, float] = {
+    default_exit_thresholds: ClassVar[dict[str, float]] = {
         "k_decl_v": 0.6,
         "k_decl_a": 0.4,
     }
 
-    default_exit_thresholds_calibration: dict[str, float] = {
+    default_exit_thresholds_calibration: ClassVar[dict[str, float]] = {
         "decline_quantile": 0.90,
     }
 
-    default_reversal_confirmation: dict[str, int | float] = {
+    default_reversal_confirmation: ClassVar[dict[str, int | float]] = {
         "lookback_period": 0,
         "decay_ratio": 0.5,
         "min_natr_ratio_percent": 0.0095,
@@ -114,7 +114,7 @@ class QuickAdapterV3(IStrategy):
     position_adjustment_enable = True
 
     # {stage: (natr_ratio_percent, stake_percent)}
-    partial_exit_stages: dict[int, tuple[float, float]] = {
+    partial_exit_stages: ClassVar[dict[int, tuple[float, float]]] = {
         0: (0.4858, 0.4),
         1: (0.6180, 0.3),
         2: (0.7640, 0.2),
@@ -1181,7 +1181,7 @@ class QuickAdapterV3(IStrategy):
         min_natr_ratio_percent: float,
         max_natr_ratio_percent: float,
         candle_idx: int = -1,
-        interpolation_direction: Literal["direct", "inverse"] = "direct",
+        interpolation_direction: InterpolationDirection = "direct",
         quantile_exponent: float = 1.5,
     ) -> float:
         df_signature = QuickAdapterV3._df_signature(df)