]> Piment Noir Git Repositories - freqai-strategies.git/commitdiff
refactor(quickadapter): cleanup optuna trials validation
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Thu, 25 Dec 2025 18:20:21 +0000 (19:20 +0100)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Thu, 25 Dec 2025 18:20:21 +0000 (19:20 +0100)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
ReforceXY/reward_space_analysis/reward_space_analysis.py
ReforceXY/reward_space_analysis/tests/api/test_api_helpers.py
ReforceXY/reward_space_analysis/tests/cli/test_cli_params_and_csv.py
ReforceXY/reward_space_analysis/tests/constants.py
ReforceXY/reward_space_analysis/tests/integration/test_report_formatting.py
ReforceXY/reward_space_analysis/tests/integration/test_reward_calculation.py
ReforceXY/reward_space_analysis/tests/statistics/test_feature_analysis_failures.py
ReforceXY/reward_space_analysis/tests/statistics/test_statistics.py
quickadapter/user_data/freqaimodels/QuickAdapterRegressorV3.py
quickadapter/user_data/strategies/Utils.py

index 0445bf2e69c3cc8c5564b7c2311eea224f14c933..8b917017e59bdd57acf10e844f8948990b1c2457 100644 (file)
@@ -2023,9 +2023,7 @@ def _perform_feature_analysis(
         )
 
     X = df[available_features].copy()
-    for col in ("trade_duration", "idle_duration"):
-        if col in X.columns and pd.api.types.is_integer_dtype(X[col]):
-            X.loc[:, col] = X[col].astype(float)
+    X = X.apply(pd.to_numeric, errors="coerce").astype(float)
     y = df["reward"].copy()
 
     # Drop wholly NaN or constant columns (provide no signal)
index 227e76a638ead0d147d6db698c89dbca05fb925f..4c6ba58dce3f514c68ca6b408bc306de2a91ddd9 100644 (file)
@@ -43,12 +43,12 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
         The test is statistical but deterministic via fixed RNG seeds.
         """
 
-        max_idle_duration_candles = 20
+        max_idle_duration_candles = SCENARIOS.API_MAX_IDLE_DURATION_CANDLES
         max_trade_duration_candles = PARAMS.TRADE_DURATION_MEDIUM
 
         def sample_entry_rate(*, idle_duration: int, short_allowed: bool) -> float:
             rng = random.Random(SEEDS.REPRODUCIBILITY)
-            draws = 2_000
+            draws = SCENARIOS.API_ENTRY_RATE_DRAWS
             entries = 0
             for _ in range(draws):
                 action, _, _, _ = _sample_action(
@@ -65,7 +65,7 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
             return entries / draws
 
         idle_duration_low = 0
-        idle_duration_high = 60
+        idle_duration_high = SCENARIOS.API_IDLE_DURATION_HIGH
 
         low_idle_rate = sample_entry_rate(idle_duration=idle_duration_low, short_allowed=True)
         high_idle_rate = sample_entry_rate(idle_duration=idle_duration_high, short_allowed=True)
@@ -95,10 +95,10 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
     def test_api_simulation_and_reward_smoke(self):
         """Test api simulation and reward smoke."""
         df = simulate_samples_with_defaults(
-            self.base_params(max_trade_duration_candles=40),
+            self.base_params(max_trade_duration_candles=SCENARIOS.API_MAX_TRADE_DURATION_CANDLES),
             num_samples=SCENARIOS.SAMPLE_SIZE_TINY,
             seed=SEEDS.SMOKE_TEST,
-            max_duration_ratio=1.5,
+            max_duration_ratio=SCENARIOS.API_MAX_DURATION_RATIO,
         )
         self.assertGreater(len(df), 0)
         any_exit = df[df["reward_exit"] != 0].head(1)
@@ -152,9 +152,9 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
         """simulate_samples() exposes bounded sampling probabilities."""
 
         df = simulate_samples_with_defaults(
-            self.base_params(max_trade_duration_candles=40),
+            self.base_params(max_trade_duration_candles=SCENARIOS.API_MAX_TRADE_DURATION_CANDLES),
             seed=SEEDS.SMOKE_TEST,
-            max_duration_ratio=1.5,
+            max_duration_ratio=SCENARIOS.API_MAX_DURATION_RATIO,
         )
 
         for col in ["sample_entry_prob", "sample_exit_prob", "sample_neutral_prob"]:
@@ -163,7 +163,7 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
         values = (
             df[["sample_entry_prob", "sample_exit_prob", "sample_neutral_prob"]].stack().dropna()
         )
-        prob_upper_bound = 0.9
+        prob_upper_bound = SCENARIOS.API_PROBABILITY_UPPER_BOUND
         self.assertTrue(((values >= 0.0) & (values <= prob_upper_bound)).all())
 
     def test_simulate_samples_interprets_bool_string_params(self):
@@ -172,7 +172,7 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
             self.base_params(
                 action_masking="true", max_trade_duration_candles=PARAMS.TRADE_DURATION_SHORT
             ),
-            num_samples=10,
+            num_samples=SCENARIOS.SAMPLE_SIZE_REPORT_MINIMAL,
             trading_mode="spot",
         )
         self.assertIsInstance(df1, pd.DataFrame)
@@ -180,7 +180,7 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
             self.base_params(
                 action_masking="false", max_trade_duration_candles=PARAMS.TRADE_DURATION_SHORT
             ),
-            num_samples=10,
+            num_samples=SCENARIOS.SAMPLE_SIZE_REPORT_MINIMAL,
             trading_mode="spot",
         )
         self.assertIsInstance(df2, pd.DataFrame)
@@ -291,7 +291,14 @@ class TestAPIAndHelpers(RewardSpaceTestBase):
         """Test build_argument_parser function."""
         parser = build_argument_parser()
         self.assertIsNotNone(parser)
-        args = parser.parse_args(["--num_samples", "100", "--out_dir", "test_output"])
+        args = parser.parse_args(
+            [
+                "--num_samples",
+                str(SCENARIOS.SAMPLE_SIZE_SMALL),
+                "--out_dir",
+                "test_output",
+            ]
+        )
         self.assertEqual(args.num_samples, 100)
         self.assertEqual(str(args.out_dir), "test_output")
 
@@ -323,7 +330,12 @@ class TestPrivateFunctions(RewardSpaceTestBase):
         """Test exit reward calculation with various scenarios."""
         scenarios = [
             (Positions.Long, Actions.Long_exit, PARAMS.PNL_MEDIUM, "Profitable long exit"),
-            (Positions.Short, Actions.Short_exit, -0.03, "Profitable short exit"),
+            (
+                Positions.Short,
+                Actions.Short_exit,
+                -PARAMS.PNL_SHORT_PROFIT,
+                "Profitable short exit",
+            ),
             (Positions.Long, Actions.Long_exit, -PARAMS.PNL_SMALL, "Losing long exit"),
             (Positions.Short, Actions.Short_exit, PARAMS.PNL_SMALL, "Losing short exit"),
         ]
@@ -353,8 +365,8 @@ class TestPrivateFunctions(RewardSpaceTestBase):
             pnl=PARAMS.PNL_SMALL,
             trade_duration=PARAMS.TRADE_DURATION_SHORT,
             idle_duration=0,
-            max_unrealized_profit=0.03,
-            min_unrealized_profit=0.01,
+            max_unrealized_profit=PARAMS.PNL_SHORT_PROFIT,
+            min_unrealized_profit=PARAMS.PNL_TINY,
             position=Positions.Short,
             action=Actions.Long_exit,
         )
@@ -389,7 +401,9 @@ class TestPrivateFunctions(RewardSpaceTestBase):
             position=Positions.Long,
             action=Actions.Long_exit,
         )
-        breakdown = calculate_reward_with_defaults(context, params, base_factor=10_000_000.0)
+        breakdown = calculate_reward_with_defaults(
+            context, params, base_factor=SCENARIOS.API_EXTREME_BASE_FACTOR
+        )
         self.assertFinite(breakdown.exit_component, name="exit_component")
 
 
index ac4a2cbe16dbbde8089e550f467038264c921766..33b03dc3e7df43167aa5d07142ee400e79a6013b 100644 (file)
@@ -10,7 +10,9 @@ from pathlib import Path
 import pandas as pd
 import pytest
 
-from ..constants import SEEDS, TOLERANCE
+from reward_space_analysis import Actions
+
+from ..constants import SCENARIOS, SEEDS, TOLERANCE
 from ..test_base import RewardSpaceTestBase
 
 # Pytest marker for taxonomy classification
@@ -46,7 +48,12 @@ class TestCsvEncoding(RewardSpaceTestBase):
         out_dir = self.output_path / "csv_int_check"
         result = _run_cli(
             out_dir=out_dir,
-            args=["--num_samples", "200", "--seed", str(SEEDS.BASE)],
+            args=[
+                "--num_samples",
+                str(SCENARIOS.CLI_NUM_SAMPLES_STANDARD),
+                "--seed",
+                str(SEEDS.BASE),
+            ],
         )
         _assert_cli_success(self, result)
         csv_path = out_dir / "reward_samples.csv"
@@ -58,7 +65,7 @@ class TestCsvEncoding(RewardSpaceTestBase):
             all((float(v).is_integer() for v in values)),
             "Non-integer values detected in 'action' column",
         )
-        allowed = {0, 1, 2, 3, 4}
+        allowed = {int(action.value) for action in Actions}
         self.assertTrue(set((int(v) for v in values)).issubset(allowed))
 
 
@@ -79,7 +86,7 @@ class TestParamsPropagation(RewardSpaceTestBase):
             out_dir=out_dir,
             args=[
                 "--num_samples",
-                "200",
+                str(SCENARIOS.CLI_NUM_SAMPLES_STANDARD),
                 "--seed",
                 str(SEEDS.BASE),
                 "--skip_feature_analysis",
@@ -100,11 +107,11 @@ class TestParamsPropagation(RewardSpaceTestBase):
             out_dir=out_dir,
             args=[
                 "--num_samples",
-                "150",
+                str(SCENARIOS.CLI_NUM_SAMPLES_HASH),
                 "--seed",
                 str(SEEDS.BASE),
                 "--risk_reward_ratio",
-                "1.5",
+                str(SCENARIOS.CLI_RISK_REWARD_RATIO_NON_DEFAULT),
             ],
         )
         _assert_cli_success(self, result)
@@ -121,7 +128,12 @@ class TestParamsPropagation(RewardSpaceTestBase):
         # Use small sample for speed; rely on default shaping logic
         result = _run_cli(
             out_dir=out_dir,
-            args=["--num_samples", "180", "--seed", str(SEEDS.BASE)],
+            args=[
+                "--num_samples",
+                str(SCENARIOS.CLI_NUM_SAMPLES_REPORT),
+                "--seed",
+                str(SEEDS.BASE),
+            ],
         )
         _assert_cli_success(self, result)
         report_path = out_dir / "statistical_analysis.md"
@@ -137,7 +149,7 @@ class TestParamsPropagation(RewardSpaceTestBase):
             out_dir=out_dir,
             args=[
                 "--num_samples",
-                "120",
+                str(SCENARIOS.CLI_NUM_SAMPLES_FAST),
                 "--seed",
                 str(SEEDS.BASE),
                 "--strict_diagnostics",
@@ -159,11 +171,11 @@ class TestParamsPropagation(RewardSpaceTestBase):
             out_dir=out_dir,
             args=[
                 "--num_samples",
-                "120",
+                str(SCENARIOS.CLI_NUM_SAMPLES_FAST),
                 "--seed",
                 str(SEEDS.BASE),
                 "--params",
-                "max_trade_duration_candles=96",
+                f"max_trade_duration_candles={SCENARIOS.CLI_MAX_TRADE_DURATION_PARAMS}",
             ],
         )
         _assert_cli_success(self, result)
@@ -175,7 +187,9 @@ class TestParamsPropagation(RewardSpaceTestBase):
         self.assertIn("simulation_params", manifest)
         rp = manifest["reward_params"]
         self.assertIn("max_trade_duration_candles", rp)
-        self.assertEqual(int(rp["max_trade_duration_candles"]), 96)
+        self.assertEqual(
+            int(rp["max_trade_duration_candles"]), SCENARIOS.CLI_MAX_TRADE_DURATION_PARAMS
+        )
 
     def test_max_trade_duration_candles_propagation_flag(self):
         """Dynamic flag --max_trade_duration_candles X propagates identically."""
@@ -184,11 +198,11 @@ class TestParamsPropagation(RewardSpaceTestBase):
             out_dir=out_dir,
             args=[
                 "--num_samples",
-                "120",
+                str(SCENARIOS.CLI_NUM_SAMPLES_FAST),
                 "--seed",
                 str(SEEDS.BASE),
                 "--max_trade_duration_candles",
-                "64",
+                str(SCENARIOS.CLI_MAX_TRADE_DURATION_FLAG),
             ],
         )
         _assert_cli_success(self, result)
@@ -200,7 +214,9 @@ class TestParamsPropagation(RewardSpaceTestBase):
         self.assertIn("simulation_params", manifest)
         rp = manifest["reward_params"]
         self.assertIn("max_trade_duration_candles", rp)
-        self.assertEqual(int(rp["max_trade_duration_candles"]), 64)
+        self.assertEqual(
+            int(rp["max_trade_duration_candles"]), SCENARIOS.CLI_MAX_TRADE_DURATION_FLAG
+        )
 
     # Owns invariant: cli-pbrs-csv-columns-121
     def test_csv_contains_pbrs_columns_when_shaping_present(self):
@@ -216,7 +232,7 @@ class TestParamsPropagation(RewardSpaceTestBase):
             out_dir=out_dir,
             args=[
                 "--num_samples",
-                "150",
+                str(SCENARIOS.CLI_NUM_SAMPLES_HASH),
                 "--seed",
                 str(SEEDS.BASE),
                 # Enable PBRS shaping explicitly
index 86282050a561402c2e5455a83f7020a127ddfd4f..cdeefa73843e6e331263f29d2601c4a21664993d 100644 (file)
@@ -26,6 +26,7 @@ class ToleranceConfig:
         NUMERIC_GUARD: Minimum threshold to prevent division by zero (1e-18)
         NEGLIGIBLE: Threshold below which values are considered negligible (1e-15)
         RELATIVE: Relative tolerance for ratio/percentage comparisons (1e-06)
+        INTEGRATION_RELATIVE_COARSE: Coarse relative tolerance for integration smoke checks (0.25)
         DISTRIB_SHAPE: Tolerance for distribution shape metrics (skew, kurtosis) (0.15)
         DECIMAL_PLACES_STRICT: Decimal places for exact formula validation (12)
         DECIMAL_PLACES_STANDARD: Decimal places for general calculations (9)
@@ -44,6 +45,7 @@ class ToleranceConfig:
     NUMERIC_GUARD: float = 1e-18
     NEGLIGIBLE: float = 1e-15
     RELATIVE: float = 1e-06
+    INTEGRATION_RELATIVE_COARSE: float = 0.25
     DISTRIB_SHAPE: float = 0.15
     DECIMAL_PLACES_STRICT: int = 12
     DECIMAL_PLACES_STANDARD: int = 9
@@ -230,7 +232,9 @@ class TestParameters:
         PNL_DUR_VOL_SCALE: Duration-based volatility scaling factor (0.001)
 
         # Common test PnL values
+        PNL_TINY: Tiny profit/loss value (0.01)
         PNL_SMALL: Small profit/loss value (0.02)
+        PNL_SHORT_PROFIT: Short profit/loss value (0.03)
         PNL_MEDIUM: Medium profit/loss value (0.05)
         PNL_LARGE: Large profit/loss value (0.10)
 
@@ -255,7 +259,9 @@ class TestParameters:
     PNL_DUR_VOL_SCALE: float = 0.001
 
     # Common PnL values
+    PNL_TINY: float = 0.01
     PNL_SMALL: float = 0.02
+    PNL_SHORT_PROFIT: float = 0.03
     PNL_MEDIUM: float = 0.05
     PNL_LARGE: float = 0.10
 
@@ -295,6 +301,27 @@ class TestScenarios:
         PBRS_SWEEP_ITERATIONS: Number of iterations for PBRS sweep tests (120)
         BOOTSTRAP_MINIMAL_ITERATIONS: Minimal bootstrap iterations for quick tests (25)
         BOOTSTRAP_EXTENDED_ITERATIONS: Extended bootstrap iterations (200)
+        SAMPLE_SIZE_REPORT_MINIMAL: Minimal sample size for report smoke tests (10)
+        REPORT_DURATION_SCALE_UP: Duration scale applied to synthetic real episodes (1.01)
+        REPORT_DURATION_SCALE_DOWN: Duration scale applied to synthetic real episodes (0.99)
+
+        # API smoke parameters
+        API_MAX_IDLE_DURATION_CANDLES: Idle duration cap used in _sample_action tests (20)
+        API_IDLE_DURATION_HIGH: High idle duration used to trigger hazard (60)
+        API_ENTRY_RATE_DRAWS: Draw count for entry-rate estimation (2000)
+        API_MAX_TRADE_DURATION_CANDLES: Max trade duration used in API simulation tests (40)
+        API_MAX_DURATION_RATIO: Max duration ratio used in API simulation tests (1.5)
+        API_PROBABILITY_UPPER_BOUND: Upper bound for exposed sampling probabilities (0.9)
+        API_EXTREME_BASE_FACTOR: Extreme base_factor used to trigger warning paths (10000000.0)
+
+        # CLI smoke parameters
+        CLI_NUM_SAMPLES_STANDARD: Default CLI sample size for smoke runs (200)
+        CLI_NUM_SAMPLES_REPORT: CLI sample size used in PBRS report smoke (180)
+        CLI_NUM_SAMPLES_HASH: CLI sample size used for params_hash checks (150)
+        CLI_NUM_SAMPLES_FAST: CLI sample size for quick branch coverage (120)
+        CLI_RISK_REWARD_RATIO_NON_DEFAULT: Non-default risk/reward ratio for manifest hashing (1.5)
+        CLI_MAX_TRADE_DURATION_PARAMS: CLI max_trade_duration_candles for --params propagation (96)
+        CLI_MAX_TRADE_DURATION_FLAG: CLI max_trade_duration_candles for dynamic flag propagation (64)
     """
 
     DURATION_SHORT: int = 150
@@ -315,6 +342,27 @@ class TestScenarios:
     PBRS_SWEEP_ITERATIONS: int = 120
     BOOTSTRAP_MINIMAL_ITERATIONS: int = 25
     BOOTSTRAP_EXTENDED_ITERATIONS: int = 200
+    SAMPLE_SIZE_REPORT_MINIMAL: int = 10
+    REPORT_DURATION_SCALE_UP: float = 1.01
+    REPORT_DURATION_SCALE_DOWN: float = 0.99
+
+    # API smoke parameters
+    API_MAX_IDLE_DURATION_CANDLES: int = 20
+    API_IDLE_DURATION_HIGH: int = 60
+    API_ENTRY_RATE_DRAWS: int = 2000
+    API_MAX_TRADE_DURATION_CANDLES: int = 40
+    API_MAX_DURATION_RATIO: float = 1.5
+    API_PROBABILITY_UPPER_BOUND: float = 0.9
+    API_EXTREME_BASE_FACTOR: float = 10_000_000.0
+
+    # CLI smoke parameters
+    CLI_NUM_SAMPLES_STANDARD: int = 200
+    CLI_NUM_SAMPLES_REPORT: int = 180
+    CLI_NUM_SAMPLES_HASH: int = 150
+    CLI_NUM_SAMPLES_FAST: int = 120
+    CLI_RISK_REWARD_RATIO_NON_DEFAULT: float = 1.5
+    CLI_MAX_TRADE_DURATION_PARAMS: int = 96
+    CLI_MAX_TRADE_DURATION_FLAG: int = 64
 
 
 @dataclass(frozen=True)
index c54dd31d3a4f06443d7f1f3f7187299dae1e08a2..b928cb827d5e1aabeda07d8543686ed25a1c14b6 100644 (file)
@@ -30,7 +30,7 @@ class TestReportFormatting(RewardSpaceTestBase):
         # Construct df with idle_duration always zero -> reward_idle all zeros so idle_mask.sum()==0
         # Position has only one unique value -> groups<2
         # pnl all zeros so no positive/negative groups with >=30 each
-        n = 40
+        n = SCENARIOS.SAMPLE_SIZE_TINY
         df = pd.DataFrame(
             {
                 "reward": np.zeros(n),
@@ -112,12 +112,12 @@ class TestReportFormatting(RewardSpaceTestBase):
     def test_distribution_shift_section_present_with_real_episodes(self):
         """Distribution Shift section renders metrics table when real episodes provided."""
         # Synthetic df (ensure >=10 non-NaN per feature)
-        synth_df = self.make_stats_df(n=60, seed=SEEDS.REPORT_FORMAT_1)
+        synth_df = self.make_stats_df(n=SCENARIOS.SAMPLE_SIZE_TINY, seed=SEEDS.REPORT_FORMAT_1)
         # Real df: shift slightly (different mean) so metrics non-zero
         real_df = synth_df.copy()
-        real_df["pnl"] = real_df["pnl"] + 0.001  # small mean shift
-        real_df["trade_duration"] = real_df["trade_duration"] * 1.01
-        real_df["idle_duration"] = real_df["idle_duration"] * 0.99
+        real_df["pnl"] = real_df["pnl"] + PARAMS.PNL_DUR_VOL_SCALE  # small mean shift
+        real_df["trade_duration"] = real_df["trade_duration"] * SCENARIOS.REPORT_DURATION_SCALE_UP
+        real_df["idle_duration"] = real_df["idle_duration"] * SCENARIOS.REPORT_DURATION_SCALE_DOWN
         content = self._write_report(synth_df, real_df=real_df)
         # Assert metrics header and at least one feature row
         self.assertIn("### 5.4 Distribution Shift Analysis", content)
@@ -135,7 +135,7 @@ class TestReportFormatting(RewardSpaceTestBase):
     def test_partial_dependence_redundancy_note_emitted(self):
         """Redundancy note appears when both feature analysis and partial dependence skipped."""
         df = self.make_stats_df(
-            n=10, seed=SEEDS.REPORT_FORMAT_2
+            n=SCENARIOS.SAMPLE_SIZE_REPORT_MINIMAL, seed=SEEDS.REPORT_FORMAT_2
         )  # small but >=4 so skip_feature_analysis flag drives behavior
         content = self._write_report(
             df,
@@ -162,27 +162,28 @@ class TestReportFormatting(RewardSpaceTestBase):
         - All metrics are formatted with proper precision
         """
         # Create df with PBRS columns
-        n = 100
+        n = SCENARIOS.SAMPLE_SIZE_SMALL
+        rng = np.random.default_rng(SEEDS.REPORT_FORMAT_1)
         df = pd.DataFrame(
             {
-                "reward": np.random.normal(0, 0.1, n),
+                "reward": rng.normal(0, 0.1, n),
                 "reward_invalid": np.zeros(n),
                 "reward_idle": np.zeros(n),
                 "reward_hold": np.zeros(n),
-                "reward_exit": np.random.normal(0, 0.05, n),
-                "reward_shaping": np.random.normal(0, 0.02, n),
+                "reward_exit": rng.normal(0, 0.05, n),
+                "reward_shaping": rng.normal(0, 0.02, n),
                 "reward_entry_additive": np.zeros(n),
                 "reward_exit_additive": np.zeros(n),
                 # PBRS columns
-                "reward_base": np.random.normal(0, 0.1, n),
-                "reward_pbrs_delta": np.random.normal(0, 0.02, n),
-                "reward_invariance_correction": np.random.normal(0, 1e-6, n),
-                "pnl": np.random.normal(0, 0.01, n),
-                "trade_duration": np.random.randint(10, 100, n).astype(float),
+                "reward_base": rng.normal(0, 0.1, n),
+                "reward_pbrs_delta": rng.normal(0, 0.02, n),
+                "reward_invariance_correction": rng.normal(0, PBRS_INVARIANCE_TOL / 10.0, n),
+                "pnl": rng.normal(0, 0.01, n),
+                "trade_duration": rng.integers(10, 100, n).astype(float),
                 "idle_duration": np.zeros(n),
-                "position": np.random.choice([0, 1, 2], n).astype(float),
-                "action": np.random.choice([0, 1, 2, 3, 4], n).astype(float),
-                "duration_ratio": np.random.uniform(0, 1, n),
+                "position": rng.choice([0, 1, 2], n).astype(float),
+                "action": rng.choice([0, 1, 2, 3, 4], n).astype(float),
+                "duration_ratio": rng.uniform(0, 1, n),
                 "idle_ratio": np.zeros(n),
             }
         )
index caa59b799ce4cbe8a564607f4cc4f4a979d65d07..467fa2317ee40f3266604c5679c466de99b2a03a 100644 (file)
@@ -143,15 +143,15 @@ class TestRewardCalculation(RewardSpaceTestBase):
         """
         params = self.base_params()
         params.pop("base_factor", None)
-        base_factor = 100.0
-        profit_aim = 0.04
+        base_factor = DEFAULT_MODEL_REWARD_PARAMETERS["base_factor"]
+        profit_aim = PARAMS.PNL_MEDIUM
         rr = PARAMS.RISK_REWARD_RATIO
 
-        for pnl, label in [(0.02, "profit"), (-0.02, "loss")]:
+        for pnl, label in [(PARAMS.PNL_SMALL, "profit"), (-PARAMS.PNL_SMALL, "loss")]:
             with self.subTest(pnl=pnl, label=label):
                 ctx_long = self.make_ctx(
                     pnl=pnl,
-                    trade_duration=50,
+                    trade_duration=PARAMS.TRADE_DURATION_SHORT,
                     idle_duration=0,
                     max_unrealized_profit=abs(pnl) + 0.005,
                     min_unrealized_profit=0.0 if pnl > 0 else pnl,
@@ -160,7 +160,7 @@ class TestRewardCalculation(RewardSpaceTestBase):
                 )
                 ctx_short = self.make_ctx(
                     pnl=pnl,
-                    trade_duration=50,
+                    trade_duration=PARAMS.TRADE_DURATION_SHORT,
                     idle_duration=0,
                     max_unrealized_profit=abs(pnl) + 0.005 if pnl > 0 else 0.01,
                     min_unrealized_profit=0.0 if pnl > 0 else pnl,
@@ -192,6 +192,10 @@ class TestRewardCalculation(RewardSpaceTestBase):
 
                 # Coarse symmetry: relative diff below relaxed tolerance
                 rel_diff = abs(abs(br_long.exit_component) - abs(br_short.exit_component)) / max(
-                    1e-12, abs(br_long.exit_component)
+                    TOLERANCE.IDENTITY_STRICT, abs(br_long.exit_component)
+                )
+                self.assertLess(
+                    rel_diff,
+                    TOLERANCE.INTEGRATION_RELATIVE_COARSE,
+                    f"Excessive asymmetry ({rel_diff:.3f}) for {label}",
                 )
-                self.assertLess(rel_diff, 0.25, f"Excessive asymmetry ({rel_diff:.3f}) for {label}")
index 8faa2364bb05b0b65a7914a3dc98e565a15432fe..2147c239f97d9bb9049f9dbf7fecf727808a9a02 100644 (file)
@@ -161,8 +161,6 @@ def test_feature_analysis_model_fitting_failure(monkeypatch):
     if RandomForestRegressor is None:  # type: ignore[comparison-overlap]
         pytest.skip("sklearn components unavailable; skipping model fitting failure test")
 
-    _ = RandomForestRegressor.fit  # preserve reference for clarity (unused)
-
     def boom(self, *a, **kw):  # noqa: D401
         raise RuntimeError("forced fit failure")
 
index 4222ed4f78da0a98e4abf885a0b1e93eca76ebcf..e5542aa91fbd69ed444085fa6f4c6eef184f76c6 100644 (file)
@@ -41,12 +41,15 @@ class TestStatistics(RewardSpaceTestBase):
     def test_statistics_feature_analysis_skip_partial_dependence(self):
         """Invariant 107: skip_partial_dependence=True yields empty partial_deps."""
         if _perform_feature_analysis is None:
-            self.skipTest("sklearn not available; skipping feature analysis invariance test")
+            self.skipTest("Feature analysis helper unavailable")
         # Use existing helper to get synthetic stats df (small for speed)
         df = self.make_stats_df(n=120, seed=SEEDS.BASE, idle_pattern="mixed")
-        importance_df, analysis_stats, partial_deps, model = _perform_feature_analysis(
-            df, seed=SEEDS.BASE, skip_partial_dependence=True, rf_n_jobs=1, perm_n_jobs=1
-        )
+        try:
+            importance_df, analysis_stats, partial_deps, model = _perform_feature_analysis(
+                df, seed=SEEDS.BASE, skip_partial_dependence=True, rf_n_jobs=1, perm_n_jobs=1
+            )
+        except ImportError:
+            self.skipTest("scikit-learn not available; skipping feature analysis invariance test")
         self.assertIsInstance(importance_df, pd.DataFrame)
         self.assertIsInstance(analysis_stats, dict)
         self.assertEqual(
@@ -248,19 +251,14 @@ class TestStatistics(RewardSpaceTestBase):
         df1 = self._shift_scale_df(300, shift=0.0)
         df2 = self._shift_scale_df(300, shift=0.3)
         metrics = compute_distribution_shift_metrics(df1, df2)
-        js_key = next((k for k in metrics if k.endswith("pnl_js_distance")), None)
-        if js_key is None:
-            self.skipTest("JS distance key not present in metrics output")
-        assert js_key is not None
+        self.assertIn("pnl_js_distance", metrics)
 
         metrics_swapped = compute_distribution_shift_metrics(df2, df1)
-        js_key_swapped = next((k for k in metrics_swapped if k.endswith("pnl_js_distance")), None)
-        self.assertIsNotNone(js_key_swapped)
-        assert js_key_swapped is not None
+        self.assertIn("pnl_js_distance", metrics_swapped)
 
         self.assertAlmostEqualFloat(
-            float(metrics[js_key]),
-            float(metrics_swapped[js_key_swapped]),
+            float(metrics["pnl_js_distance"]),
+            float(metrics_swapped["pnl_js_distance"]),
             tolerance=TOLERANCE.IDENTITY_STRICT,
             rtol=TOLERANCE.RELATIVE,
         )
@@ -446,12 +444,15 @@ class TestStatistics(RewardSpaceTestBase):
             float(reward_space_analysis.Actions.Short_exit.value),
         )
         exit_data = df[df["action"].isin(exit_action_codes)].copy()
-        if len(exit_data) < SCENARIOS.SAMPLE_SIZE_TINY:
-            self.skipTest("Insufficient exit actions for heteroscedasticity test")
+        self.assertGreaterEqual(
+            len(exit_data),
+            SCENARIOS.SAMPLE_SIZE_TINY,
+            f"Insufficient exit actions for heteroscedasticity test (n={len(exit_data)})",
+        )
         exit_data["duration_bin"] = pd.cut(
             exit_data["duration_ratio"], bins=4, labels=["Q1", "Q2", "Q3", "Q4"]
         )
-        variance_by_bin = exit_data.groupby("duration_bin")["pnl"].var().dropna()
+        variance_by_bin = exit_data.groupby("duration_bin", observed=False)["pnl"].var().dropna()
         if "Q1" in variance_by_bin.index and "Q4" in variance_by_bin.index:
             self.assertGreater(
                 variance_by_bin["Q4"],
index 5f5383e538517be9a8af38d39351609e71340254..3ee30160c5e55fbfef71cbb983c14cfe4551b908 100644 (file)
@@ -2106,7 +2106,8 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
                 isinstance(trial.values, list)
                 and len(trial.values) == n_objectives
                 and all(
-                    isinstance(value, (int, float)) and np.isfinite(value)
+                    isinstance(value, (int, float))
+                    and (np.isfinite(value) or np.isinf(value))
                     for value in trial.values
                 )
             )
@@ -2377,7 +2378,8 @@ class QuickAdapterRegressorV3(BaseRegressionModel):
     ) -> None:
         if not study:
             return
-
+        if not self.optuna_validate_params(pair, namespace, study):
+            return
         best_params = self.get_optuna_params(pair, namespace)
         if not best_params:
             return
index 70eed709fb893520a2fb10cc870da2321987e8d0..7827e46f21191e435a8b577e1f0bf1b0b603a177 100644 (file)
@@ -1933,7 +1933,9 @@ def get_optuna_callbacks(
         ]
     elif regressor == REGRESSORS[1]:  # "lightgbm"
         callbacks = [
-            optuna.integration.LightGBMPruningCallback(trial, "rmse", valid_name="valid_0")
+            optuna.integration.LightGBMPruningCallback(
+                trial, "rmse", valid_name="valid_0"
+            )
         ]
     else:
         raise ValueError(