except ImportError:
from typing_extensions import NotRequired, Required # Python <3.11
+
ConfigTuple = Tuple[str, str, float, int, int, int]
SUMMARY_FILENAME = "reward_space_cli.json"
max_scenarios: int = 40,
shuffle_seed: Optional[int] = None,
) -> List[ConfigTuple]:
+ # Constants from reward_space_analysis.py
+ # ALLOWED_EXIT_POTENTIAL_MODES and ATTENUATION_MODES_WITH_LEGACY
exit_potential_modes = [
"canonical",
"non_canonical",
"progressive_release",
- "retain_previous",
"spike_cancel",
+ "retain_previous",
]
- exit_attenuation_modes = ["linear", "sqrt", "power", "half_life", "legacy"]
+ exit_attenuation_modes = ["sqrt", "linear", "power", "half_life", "legacy"]
potential_gammas = [0.0, 0.5, 0.95, 0.999]
hold_enabled = [0, 1]
entry_additive_enabled = [0, 1]
import pytest
-from reward_space_analysis import apply_transform
+from reward_space_analysis import ALLOWED_TRANSFORMS, apply_transform
from ..test_base import RewardSpaceTestBase
"""Comprehensive transform function tests with parameterized scenarios."""
# Transform function test data
- SMOOTH_TRANSFORMS = ["tanh", "softsign", "arctan", "sigmoid", "asinh"]
- ALL_TRANSFORMS = SMOOTH_TRANSFORMS + ["clip"]
+ SMOOTH_TRANSFORMS = [t for t in ALLOWED_TRANSFORMS if t != "clip"]
+ ALL_TRANSFORMS = list(ALLOWED_TRANSFORMS)
def test_transform_exact_values(self):
"""Test transform functions produce exact expected values for specific inputs."""
from reward_space_analysis import DEFAULT_MODEL_REWARD_PARAMETERS
+from .constants import SEEDS
+
@pytest.fixture(scope="session")
def temp_output_dir():
@pytest.fixture(autouse=True)
def setup_rng():
"""Configure RNG for reproducibility."""
- np.random.seed(42)
+ np.random.seed(SEEDS.BASE)
@pytest.fixture
Attributes:
TERMINAL_TOL: Terminal potential must be within this tolerance of zero (1e-09)
MAX_ABS_SHAPING: Maximum absolute shaping value for bounded checks (10.0)
+ TERMINAL_PROBABILITY: Default probability of terminal state in sweeps (0.08)
"""
TERMINAL_TOL: float = 1e-09
MAX_ABS_SHAPING: float = 10.0
+ TERMINAL_PROBABILITY: float = 0.08
@dataclass(frozen=True)
Attributes:
BH_FP_RATE_THRESHOLD: Benjamini-Hochberg false positive rate threshold (0.15)
BOOTSTRAP_DEFAULT_ITERATIONS: Default bootstrap resampling count (100)
+ EXIT_PROBABILITY_THRESHOLD: Probability threshold for exit events (0.15)
"""
BH_FP_RATE_THRESHOLD: float = 0.15
BOOTSTRAP_DEFAULT_ITERATIONS: int = 100
+ EXIT_PROBABILITY_THRESHOLD: float = 0.15
@dataclass(frozen=True)
- BOOTSTRAP: Prime number for bootstrap confidence interval tests to ensure
independence from other random sequences
- HETEROSCEDASTICITY: Dedicated seed for variance structure validation tests
+ - SMOKE_TEST: Seed for smoke tests
+ - CANONICAL_SWEEP: Seed for canonical PBRS sweep tests
Attributes:
BASE: Default seed for standard tests (42)
REPRODUCIBILITY: Seed for reproducibility validation (12345)
BOOTSTRAP: Seed for bootstrap CI tests (999)
HETEROSCEDASTICITY: Seed for heteroscedasticity tests (7890)
+ SMOKE_TEST: Seed for smoke tests (7)
+ CANONICAL_SWEEP: Seed for canonical sweep tests (123)
+
+ # PBRS-specific seeds
+ PBRS_INVARIANCE_1: Seed for PBRS invariance test case 1 (913)
+ PBRS_INVARIANCE_2: Seed for PBRS invariance test case 2 (515)
+ PBRS_TERMINAL: Seed for PBRS terminal potential tests (777)
+
+ # Feature analysis failure seeds
+ FEATURE_EMPTY: Seed for empty feature tests (17)
+ FEATURE_PRIME_11: Seed for feature test variant (11)
+ FEATURE_PRIME_13: Seed for feature test variant (13)
+ FEATURE_PRIME_21: Seed for feature test variant (21)
+ FEATURE_PRIME_33: Seed for feature test variant (33)
+ FEATURE_PRIME_47: Seed for feature test variant (47)
+ FEATURE_SMALL_5: Seed for small feature test (5)
+ FEATURE_SMALL_3: Seed for small feature test (3)
+
+ # Report formatting seeds
+ REPORT_FORMAT_1: Seed for report formatting test 1 (234)
+ REPORT_FORMAT_2: Seed for report formatting test 2 (321)
"""
BASE: int = 42
REPRODUCIBILITY: int = 12345
BOOTSTRAP: int = 999
HETEROSCEDASTICITY: int = 7890
+ SMOKE_TEST: int = 7
+ CANONICAL_SWEEP: int = 123
+
+ # PBRS-specific seeds
+ PBRS_INVARIANCE_1: int = 913
+ PBRS_INVARIANCE_2: int = 515
+ PBRS_TERMINAL: int = 777
+
+ # Feature analysis failure seeds
+ FEATURE_EMPTY: int = 17
+ FEATURE_PRIME_11: int = 11
+ FEATURE_PRIME_13: int = 13
+ FEATURE_PRIME_21: int = 21
+ FEATURE_PRIME_33: int = 33
+ FEATURE_PRIME_47: int = 47
+ FEATURE_SMALL_5: int = 5
+ FEATURE_SMALL_3: int = 3
+
+ # Report formatting seeds
+ REPORT_FORMAT_1: int = 234
+ REPORT_FORMAT_2: int = 321
@dataclass(frozen=True)
DURATION_MEDIUM: Medium duration scenario (200)
DURATION_LONG: Long duration scenario (300)
DURATION_SCENARIOS: Standard duration test sequence
+ SAMPLE_SIZE_TINY: Tiny sample size for smoke tests (50)
SAMPLE_SIZE_SMALL: Small sample size for quick tests (100)
SAMPLE_SIZE_MEDIUM: Medium sample size for standard tests (400)
SAMPLE_SIZE_LARGE: Large sample size for statistical power (800)
- DEFAULT_SAMPLE_SIZE: Default for most tests (400)
+ SAMPLE_SIZE_CONST_DF: Sample size for constant dataframes (64)
+ SAMPLE_SIZE_SHIFT_SCALE: Sample size for shift/scale tests (256)
PBRS_SIMULATION_STEPS: Number of steps for PBRS simulation tests (500)
- NULL_HYPOTHESIS_SAMPLE_SIZE: Sample size for null hypothesis tests (400)
- BOOTSTRAP_MINIMAL_ITERATIONS: Minimal bootstrap iterations for quick tests (25)
- BOOTSTRAP_STANDARD_ITERATIONS: Standard bootstrap iterations (100)
- HETEROSCEDASTICITY_MIN_EXITS: Minimum exits for heteroscedasticity validation (50)
- CORRELATION_TEST_MIN_SIZE: Minimum sample size for correlation tests (200)
MONTE_CARLO_ITERATIONS: Monte Carlo simulation iterations (160)
+ PBRS_SWEEP_ITERATIONS: Number of iterations for PBRS sweep tests (120)
+ BOOTSTRAP_MINIMAL_ITERATIONS: Minimal bootstrap iterations for quick tests (25)
+ BOOTSTRAP_EXTENDED_ITERATIONS: Extended bootstrap iterations (200)
"""
DURATION_SHORT: int = 150
DURATION_LONG: int = 300
DURATION_SCENARIOS: tuple[int, ...] = (150, 200, 300)
+ SAMPLE_SIZE_TINY: int = 50
SAMPLE_SIZE_SMALL: int = 100
SAMPLE_SIZE_MEDIUM: int = 400
SAMPLE_SIZE_LARGE: int = 800
- DEFAULT_SAMPLE_SIZE: int = 400
+ SAMPLE_SIZE_CONST_DF: int = 64
+ SAMPLE_SIZE_SHIFT_SCALE: int = 256
# Specialized test scenario sizes
PBRS_SIMULATION_STEPS: int = 500
- NULL_HYPOTHESIS_SAMPLE_SIZE: int = 400
- BOOTSTRAP_MINIMAL_ITERATIONS: int = 25
- BOOTSTRAP_STANDARD_ITERATIONS: int = 100
- HETEROSCEDASTICITY_MIN_EXITS: int = 50
- CORRELATION_TEST_MIN_SIZE: int = 200
MONTE_CARLO_ITERATIONS: int = 160
+ PBRS_SWEEP_ITERATIONS: int = 120
+ BOOTSTRAP_MINIMAL_ITERATIONS: int = 25
+ BOOTSTRAP_EXTENDED_ITERATIONS: int = 200
@dataclass(frozen=True)
calculate_reward,
)
+from ..constants import TOLERANCE
from .configs import RewardScenarioConfig, ThresholdTestConfig, ValidationConfig
Example:
config = ValidationConfig(
- tolerance_strict=1e-12,
- tolerance_relaxed=1e-09,
+ tolerance_strict=TOLERANCE.IDENTITY_STRICT,
+ tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED,
exclude_components=["reward_shaping"],
component_description="core components"
)
Example:
assert_single_active_component(
- self, breakdown, "exit_component", 1e-09,
+ self, breakdown, "exit_component", TOLERANCE.IDENTITY_RELAXED,
["hold_penalty", "idle_penalty", "invalid_penalty"]
)
"""
Example:
assert_single_active_component_with_additives(
- self, breakdown, "exit_component", 1e-09,
+ self, breakdown, "exit_component", TOLERANCE.IDENTITY_RELAXED,
["hold_penalty", "idle_penalty"],
enforce_additives_zero=True
)
Example:
config = RewardScenarioConfig(
- base_factor=90.0,
- profit_target=0.06,
- risk_reward_ratio=1.0,
- tolerance_relaxed=1e-09
+ base_factor=PARAMS.BASE_FACTOR,
+ profit_target=PARAMS.PROFIT_TARGET,
+ risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+ tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
)
scenarios = [
(idle_context, {}, "idle scenario"),
Example:
config = RewardScenarioConfig(
- base_factor=90.0,
- profit_target=0.06,
- risk_reward_ratio=1.0,
- tolerance_relaxed=1e-09
+ base_factor=PARAMS.BASE_FACTOR,
+ profit_target=PARAMS.PROFIT_TARGET,
+ risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+ tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
)
variations = [
{"exit_additive": 0.0},
Example:
assert_exit_mode_mathematical_validation(
- self, context, params, 90.0, 0.06, 1.0, 1e-09
+ self, context, params, PARAMS.BASE_FACTOR, PARAMS.PROFIT_TARGET,
+ PARAMS.RISK_REWARD_RATIO, TOLERANCE.IDENTITY_RELAXED
)
"""
duration_ratio = context.trade_duration / 100
Example:
config = RewardScenarioConfig(
- base_factor=90.0,
- profit_target=0.06,
- risk_reward_ratio=1.0,
- tolerance_relaxed=1e-09
+ base_factor=PARAMS.BASE_FACTOR,
+ profit_target=PARAMS.PROFIT_TARGET,
+ risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+ tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
)
test_cases = [
- (0.0, 1.0, "zero profit target"),
- (0.06, 1.0, "standard parameters"),
+ (0.0, PARAMS.RISK_REWARD_RATIO, "zero profit target"),
+ (PARAMS.PROFIT_TARGET, PARAMS.RISK_REWARD_RATIO, "standard parameters"),
(0.06, 2.0, "high risk/reward ratio"),
]
assert_multi_parameter_sensitivity(
(100, "at threshold"),
(150, "above threshold"),
],
- tolerance=1e-09
+ tolerance=TOLERANCE.IDENTITY_RELAXED
)
assert_hold_penalty_threshold_behavior(
- self, make_context, params, 90.0, 0.06, 1.0, config
+ self, make_context, params, PARAMS.BASE_FACTOR, PARAMS.PROFIT_TARGET,
+ PARAMS.RISK_REWARD_RATIO, config
)
"""
for trade_duration, description in config.test_cases:
f_bad = exit_factor_fn(base_factor, pnl, pnl_factor, duration_ratio, bad_params)
f_ref = exit_factor_fn(base_factor, pnl, pnl_factor, duration_ratio, reference_params)
- test_case.assertAlmostEqual(f_bad, f_ref, delta=1e-12)
+ test_case.assertAlmostEqual(f_bad, f_ref, delta=TOLERANCE.IDENTITY_STRICT)
test_case.assertGreaterEqual(f_bad, 0.0)
Usage:
from tests.helpers.configs import RewardScenarioConfig
+ from tests.constants import PARAMS, TOLERANCE
config = RewardScenarioConfig(
- base_factor=90.0,
- profit_target=0.06,
- risk_reward_ratio=1.0,
- tolerance_relaxed=1e-09
+ base_factor=PARAMS.BASE_FACTOR,
+ profit_target=PARAMS.PROFIT_TARGET,
+ risk_reward_ratio=PARAMS.RISK_REWARD_RATIO,
+ tolerance_relaxed=TOLERANCE.IDENTITY_RELAXED
)
assert_reward_calculation_scenarios(
from dataclasses import dataclass
from typing import Callable, Optional
+from ..constants import SEEDS, STATISTICAL, TOLERANCE
+
@dataclass
class RewardScenarioConfig:
component_description: Human-readable description of validated components
"""
- tolerance_strict: float
- tolerance_relaxed: float
+ tolerance_strict: float = TOLERANCE.IDENTITY_STRICT
+ tolerance_relaxed: float = TOLERANCE.IDENTITY_RELAXED
exclude_components: Optional[list[str]] = None
component_description: str = "reward components"
attenuation_mode: str
plateau_enabled: bool = False
plateau_grace: float = 0.0
- tolerance: float = 1e-09
+ tolerance: float = TOLERANCE.IDENTITY_RELAXED
@dataclass
alpha: Significance level
"""
- n_bootstrap: int = 100
+ n_bootstrap: int = STATISTICAL.BOOTSTRAP_DEFAULT_ITERATIONS
confidence_level: float = 0.95
- seed: int = 42
+ seed: int = SEEDS.BASE
adjust_method: Optional[str] = None
alpha: float = 0.05
from reward_space_analysis import PBRS_INVARIANCE_TOL, write_complete_statistical_analysis
-from ..constants import SCENARIOS
+from ..constants import SCENARIOS, SEEDS
from ..test_base import RewardSpaceTestBase
pytestmark = pytest.mark.integration
real_df=real_df,
adjust_method="none",
strict_diagnostics=False,
- bootstrap_resamples=SCENARIOS.BOOTSTRAP_STANDARD_ITERATIONS, # keep test fast
+ bootstrap_resamples=SCENARIOS.SAMPLE_SIZE_SMALL, # keep test fast
skip_partial_dependence=kwargs.get("skip_partial_dependence", False),
skip_feature_analysis=kwargs.get("skip_feature_analysis", False),
)
def test_distribution_shift_section_present_with_real_episodes(self):
"""Distribution Shift section renders metrics table when real episodes provided."""
# Synthetic df (ensure >=10 non-NaN per feature)
- synth_df = self.make_stats_df(n=60, seed=123)
+ synth_df = self.make_stats_df(n=60, seed=SEEDS.REPORT_FORMAT_1)
# Real df: shift slightly (different mean) so metrics non-zero
real_df = synth_df.copy()
real_df["pnl"] = real_df["pnl"] + 0.001 # small mean shift
def test_partial_dependence_redundancy_note_emitted(self):
"""Redundancy note appears when both feature analysis and partial dependence skipped."""
df = self.make_stats_df(
- n=10, seed=321
+ n=10, seed=SEEDS.REPORT_FORMAT_2
) # small but >=4 so skip_feature_analysis flag drives behavior
content = self._write_report(
df,
write_complete_statistical_analysis,
)
+from ..constants import SEEDS
from ..helpers import (
assert_non_canonical_shaping_exceeds,
assert_pbrs_canonical_sum_within_tolerance,
)
df = simulate_samples(
params={**params, "max_trade_duration_candles": 100},
- num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
+ num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
seed=self.SEED,
base_factor=self.TEST_BASE_FACTOR,
profit_target=self.TEST_PROFIT_TARGET,
)
df = simulate_samples(
params={**params, "max_trade_duration_candles": 100},
- num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
+ num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
seed=self.SEED,
base_factor=self.TEST_BASE_FACTOR,
profit_target=self.TEST_PROFIT_TARGET,
df = simulate_samples(
params={**params, "max_trade_duration_candles": 140},
num_samples=SCENARIOS.SAMPLE_SIZE_LARGE // 2, # 500 ≈ 400 (keep original intent)
- seed=913,
+ seed=SEEDS.PBRS_INVARIANCE_1,
base_factor=self.TEST_BASE_FACTOR,
profit_target=self.TEST_PROFIT_TARGET,
risk_reward_ratio=self.TEST_RR,
df_exc = simulate_samples(
params={**params, "max_trade_duration_candles": 120},
num_samples=250,
- seed=515,
+ seed=SEEDS.PBRS_INVARIANCE_2,
base_factor=self.TEST_BASE_FACTOR,
profit_target=self.TEST_PROFIT_TARGET,
risk_reward_ratio=self.TEST_RR,
)
df_can = simulate_samples(
params={**params_can, "max_trade_duration_candles": 120},
- num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
- seed=777,
+ num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
+ seed=SEEDS.PBRS_TERMINAL,
base_factor=self.TEST_BASE_FACTOR,
profit_target=self.TEST_PROFIT_TARGET,
risk_reward_ratio=self.TEST_RR,
)
df_non = simulate_samples(
params={**params_non, "max_trade_duration_candles": 120},
- num_samples=SCENARIOS.DEFAULT_SAMPLE_SIZE,
- seed=777,
+ num_samples=SCENARIOS.SAMPLE_SIZE_MEDIUM,
+ seed=SEEDS.PBRS_TERMINAL,
base_factor=self.TEST_BASE_FACTOR,
profit_target=self.TEST_PROFIT_TARGET,
risk_reward_ratio=self.TEST_RR,
m2 = np.mean(c**2)
m3 = np.mean(c**3)
m4 = np.mean(c**4)
- skew = m3 / (m2**1.5 + 1e-18)
- kurt = m4 / (m2**2 + 1e-18) - 3.0
+ skew = m3 / (m2**1.5 + self.TOL_NUMERIC_GUARD)
+ kurt = m4 / (m2**2 + self.TOL_NUMERIC_GUARD) - 3.0
return (float(skew), float(kurt))
s_base, k_base = _skew_kurt(base)
rng = np.random.default_rng(321)
last_potential = 0.0
shaping_sum = 0.0
+ from ..constants import STATISTICAL
+
for _ in range(SCENARIOS.MONTE_CARLO_ITERATIONS):
- is_exit = rng.uniform() < 0.15
+ is_exit = rng.uniform() < STATISTICAL.EXIT_PROBABILITY_THRESHOLD
next_pnl = 0.0 if is_exit else float(rng.normal(0, 0.07))
next_dur = 0.0 if is_exit else float(rng.uniform(0, 1))
_tot, shap, next_pot, _pbrs_delta, _entry_additive, _exit_additive = (
"""Report generation without PBRS columns triggers absence + shift placeholder."""
import pandas as pd
+ from ..constants import SEEDS
+
n = 90
- rng = np.random.default_rng(123)
+ rng = np.random.default_rng(SEEDS.CANONICAL_SWEEP)
df = pd.DataFrame(
{
"reward": rng.normal(0.05, 0.02, n),
def test_plateau_continuity_at_grace_boundary(self):
"""Test plateau continuity at grace boundary."""
- modes = ["sqrt", "linear", "power", "half_life"]
+ modes = list(ATTENUATION_MODES)
grace = 0.8
eps = self.CONTINUITY_EPS_SMALL
base_factor = self.TEST_BASE_FACTOR
import pytest
from reward_space_analysis import _perform_feature_analysis # type: ignore
+from tests.constants import SEEDS
pytestmark = pytest.mark.statistics
def test_feature_analysis_missing_reward_column():
df = _minimal_df().drop(columns=["reward"]) # remove reward
importance_df, stats, partial_deps, model = _perform_feature_analysis(
- df, seed=7, skip_partial_dependence=True
+ df, seed=SEEDS.FEATURE_EMPTY, skip_partial_dependence=True
)
assert importance_df.empty
assert stats["model_fitted"] is False
def test_feature_analysis_empty_frame():
df = _minimal_df(0) # empty
importance_df, stats, partial_deps, model = _perform_feature_analysis(
- df, seed=7, skip_partial_dependence=True
+ df, seed=SEEDS.FEATURE_EMPTY, skip_partial_dependence=True
)
assert importance_df.empty
assert stats["n_features"] == 0
def test_feature_analysis_single_feature_path():
df = pd.DataFrame({"pnl": np.random.normal(0, 1, 25), "reward": np.random.normal(0, 1, 25)})
importance_df, stats, partial_deps, model = _perform_feature_analysis(
- df, seed=11, skip_partial_dependence=True
+ df, seed=SEEDS.FEATURE_PRIME_11, skip_partial_dependence=True
)
assert stats["n_features"] == 1
# Importance stub path returns NaNs
}
)
importance_df, stats, partial_deps, model = _perform_feature_analysis(
- df, seed=13, skip_partial_dependence=True
+ df, seed=SEEDS.FEATURE_PRIME_13, skip_partial_dependence=True
)
# Should hit NaN stub path (model_fitted False)
assert stats["model_fitted"] is False
monkeypatch.setattr(RandomForestRegressor, "fit", boom)
df = _minimal_df(50)
importance_df, stats, partial_deps, model = _perform_feature_analysis(
- df, seed=21, skip_partial_dependence=True
+ df, seed=SEEDS.FEATURE_PRIME_21, skip_partial_dependence=True
)
assert stats["model_fitted"] is False
assert model is None
monkeypatch.setattr("reward_space_analysis.permutation_importance", perm_boom)
df = _minimal_df(60)
importance_df, stats, partial_deps, model = _perform_feature_analysis(
- df, seed=33, skip_partial_dependence=False
+ df, seed=SEEDS.FEATURE_PRIME_33, skip_partial_dependence=False
)
assert stats["model_fitted"] is True
# Importance should be NaNs due to failure
def test_feature_analysis_success_partial_dependence():
df = _minimal_df(70)
importance_df, stats, partial_deps, model = _perform_feature_analysis(
- df, seed=47, skip_partial_dependence=False
+ df, seed=SEEDS.FEATURE_PRIME_47, skip_partial_dependence=False
)
# Expect at least one non-NaN importance (model fitted path)
assert importance_df["importance_mean"].notna().any()
monkeypatch.setattr("reward_space_analysis.r2_score", None)
df = _minimal_df(10)
with pytest.raises(ImportError):
- _perform_feature_analysis(df, seed=5, skip_partial_dependence=True)
+ _perform_feature_analysis(df, seed=SEEDS.FEATURE_SMALL_5, skip_partial_dependence=True)
def test_module_level_sklearn_import_failure_reload():
# Perform feature analysis should raise ImportError under missing components
df = _minimal_df(15)
with pytest.raises(ImportError):
- rsa_fallback._perform_feature_analysis(df, seed=3, skip_partial_dependence=True) # type: ignore[attr-defined]
+ rsa_fallback._perform_feature_analysis(
+ df, seed=SEEDS.FEATURE_SMALL_3, skip_partial_dependence=True
+ ) # type: ignore[attr-defined]
finally:
# Restore importer
builtins.__import__ = orig_import
"""Equal scaling keeps KL/JS ≈0."""
from ..constants import SCENARIOS, STAT_TOL
- df1 = self._shift_scale_df(SCENARIOS.DEFAULT_SAMPLE_SIZE)
+ df1 = self._shift_scale_df(SCENARIOS.SAMPLE_SIZE_MEDIUM)
scale = 3.5
df2 = df1.copy()
df2["pnl"] *= scale
from ..constants import SCENARIOS
rng = np.random.default_rng(1234)
- n = SCENARIOS.NULL_HYPOTHESIS_SAMPLE_SIZE
+ n = SCENARIOS.SAMPLE_SIZE_MEDIUM
df = pd.DataFrame(
{
"pnl": rng.normal(0, 1, n),
pnl_duration_vol_scale=self.TEST_PNL_DUR_VOL_SCALE,
)
exit_data = df[df["reward_exit"] != 0].copy()
- if len(exit_data) < SCENARIOS.HETEROSCEDASTICITY_MIN_EXITS:
+ if len(exit_data) < SCENARIOS.SAMPLE_SIZE_TINY:
self.skipTest("Insufficient exit actions for heteroscedasticity test")
exit_data["duration_bin"] = pd.cut(
exit_data["duration_ratio"], bins=4, labels=["Q1", "Q2", "Q3", "Q4"]
CONTINUITY,
EXIT_FACTOR,
PBRS,
+ SCENARIOS,
+ SEEDS,
+ STATISTICAL,
TOLERANCE,
)
@classmethod
def setUpClass(cls):
"""Set up class-level constants."""
- cls.SEED = 42
+ cls.SEED = SEEDS.BASE
cls.DEFAULT_PARAMS = DEFAULT_MODEL_REWARD_PARAMETERS.copy()
- cls.TEST_SAMPLES = 50
+ cls.TEST_SAMPLES = SCENARIOS.SAMPLE_SIZE_TINY
cls.TEST_BASE_FACTOR = 100.0
cls.TEST_PROFIT_TARGET = 0.03
cls.TEST_RR = 1.0
cls.TEST_PNL_STD = 0.02
cls.TEST_PNL_DUR_VOL_SCALE = 0.5
# Seeds for different test contexts
- cls.SEED_SMOKE_TEST = 7
- cls.SEED_REPRODUCIBILITY = 777
- cls.SEED_BOOTSTRAP = 2024
- cls.SEED_HETEROSCEDASTICITY = 123
+ cls.SEED_SMOKE_TEST = SEEDS.SMOKE_TEST
+ cls.SEED_REPRODUCIBILITY = SEEDS.REPRODUCIBILITY
+ cls.SEED_BOOTSTRAP = SEEDS.BOOTSTRAP
+ cls.SEED_HETEROSCEDASTICITY = SEEDS.HETEROSCEDASTICITY
# Statistical test thresholds
- cls.BOOTSTRAP_DEFAULT_ITERATIONS = 200
- cls.BH_FP_RATE_THRESHOLD = 0.15
- cls.EXIT_FACTOR_SCALING_RATIO_MIN = 5.0
- cls.EXIT_FACTOR_SCALING_RATIO_MAX = 15.0
+ cls.BOOTSTRAP_DEFAULT_ITERATIONS = SCENARIOS.BOOTSTRAP_EXTENDED_ITERATIONS
+ cls.BH_FP_RATE_THRESHOLD = STATISTICAL.BH_FP_RATE_THRESHOLD
+ cls.EXIT_FACTOR_SCALING_RATIO_MIN = EXIT_FACTOR.SCALING_RATIO_MIN
+ cls.EXIT_FACTOR_SCALING_RATIO_MAX = EXIT_FACTOR.SCALING_RATIO_MAX
def setUp(self):
"""Set up test fixtures with reproducible random seed."""
MIN_EXIT_POWER_TAU = EXIT_FACTOR.MIN_POWER_TAU
# Test-specific constants
- PBRS_TERMINAL_PROB = 0.08
- PBRS_SWEEP_ITER = 120
+ PBRS_TERMINAL_PROB = PBRS.TERMINAL_PROBABILITY
+ PBRS_SWEEP_ITER = SCENARIOS.PBRS_SWEEP_ITERATIONS
JS_DISTANCE_UPPER_BOUND = math.sqrt(math.log(2.0))
def make_ctx(
*,
iterations: Optional[int] = None,
terminal_prob: Optional[float] = None,
- seed: int = 123,
+ seed: int = SEEDS.CANONICAL_SWEEP,
) -> tuple[list[float], list[float]]:
"""Run a lightweight canonical invariance sweep.
if diff <= tolerance:
return
if rtol is not None:
- scale = max(abs(first), abs(second), 1e-15)
+ scale = max(abs(first), abs(second), self.TOL_NEGLIGIBLE)
if diff <= rtol * scale:
return
self.fail(
self.assertAlmostEqualFloat(va, vb, tolerance=atol, rtol=rtol, msg=msg)
@staticmethod
- def seed_all(seed: int = 123) -> None:
+ def seed_all(seed: int = SEEDS.CANONICAL_SWEEP) -> None:
"""Seed all RNGs used (numpy & random)."""
np.random.seed(seed)
random.seed(seed)
- def _const_df(self, n: int = 64) -> pd.DataFrame:
+ def _const_df(self, n: int = SCENARIOS.SAMPLE_SIZE_CONST_DF) -> pd.DataFrame:
return pd.DataFrame(
{
"reward": np.ones(n) * 0.5,
}
)
- def _shift_scale_df(self, n: int = 256, shift: float = 0.0, scale: float = 1.0) -> pd.DataFrame:
- rng = np.random.default_rng(123)
+ def _shift_scale_df(
+ self, n: int = SCENARIOS.SAMPLE_SIZE_SHIFT_SCALE, shift: float = 0.0, scale: float = 1.0
+ ) -> pd.DataFrame:
+ rng = np.random.default_rng(SEEDS.CANONICAL_SWEEP)
base = rng.normal(0, 1, n)
return pd.DataFrame(
{
from typing import (
Any,
Callable,
+ ClassVar,
Dict,
+ Final,
List,
Literal,
Optional,
- pip install optuna-dashboard
"""
- _LOG_2 = math.log(2.0)
- DEFAULT_IDLE_DURATION_MULTIPLIER: int = 4
+ _LOG_2: Final[float] = math.log(2.0)
+ DEFAULT_IDLE_DURATION_MULTIPLIER: Final[int] = 4
- _MODEL_TYPES: tuple[ModelType, ...] = (
+ _MODEL_TYPES: Final[tuple[ModelType, ...]] = (
"PPO",
"RecurrentPPO",
"MaskablePPO",
"DQN",
"QRDQN",
)
- _SCHEDULE_TYPES: tuple[ScheduleType, ...] = ("linear", "constant", "unknown")
- _EXIT_POTENTIAL_MODES: tuple[ExitPotentialMode, ...] = (
+ _SCHEDULE_TYPES: Final[tuple[ScheduleType, ...]] = ("linear", "constant", "unknown")
+ _EXIT_POTENTIAL_MODES: Final[tuple[ExitPotentialMode, ...]] = (
"canonical",
"non_canonical",
"progressive_release",
"spike_cancel",
"retain_previous",
)
- _TRANSFORM_FUNCTIONS: tuple[TransformFunction, ...] = (
+ _TRANSFORM_FUNCTIONS: Final[tuple[TransformFunction, ...]] = (
"tanh",
"softsign",
"arctan",
"asinh",
"clip",
)
- _EXIT_ATTENUATION_MODES: tuple[ExitAttenuationMode, ...] = (
+ _EXIT_ATTENUATION_MODES: Final[tuple[ExitAttenuationMode, ...]] = (
"legacy",
"sqrt",
"linear",
"power",
"half_life",
)
- _ACTIVATION_FUNCTIONS: tuple[ActivationFunction, ...] = (
+ _ACTIVATION_FUNCTIONS: Final[tuple[ActivationFunction, ...]] = (
"tanh",
"relu",
"elu",
"leaky_relu",
)
- _OPTIMIZER_CLASSES: tuple[OptimizerClass, ...] = ("adam", "adamw", "rmsprop")
- _NET_ARCH_SIZES: tuple[NetArchSize, ...] = (
+ _OPTIMIZER_CLASSES: Final[tuple[OptimizerClass, ...]] = ("adam", "adamw", "rmsprop")
+ _NET_ARCH_SIZES: Final[tuple[NetArchSize, ...]] = (
"small",
"medium",
"large",
"extra_large",
)
- _STORAGE_BACKENDS: tuple[StorageBackend, ...] = ("sqlite", "file")
- _SAMPLER_TYPES: tuple[SamplerType, ...] = ("tpe", "auto")
+ _STORAGE_BACKENDS: Final[tuple[StorageBackend, ...]] = ("sqlite", "file")
+ _SAMPLER_TYPES: Final[tuple[SamplerType, ...]] = ("tpe", "auto")
- _action_masks_cache: Dict[Tuple[bool, float], NDArray[np.bool_]] = {}
+ _action_masks_cache: ClassVar[Dict[Tuple[bool, float], NDArray[np.bool_]]] = {}
@staticmethod
def _model_types_set() -> set[ModelType]:
logger.info("Trial %s params: %s", trial.number, params)
# "PPO"
-
if self._MODEL_TYPES[0] in self.model_type:
n_steps = params.get("n_steps", 0)
if n_steps > 0:
Sampler for PPO hyperparams
"""
return convert_optuna_params_to_model_params(
- "PPO", get_common_ppo_optuna_params(trial)
+ ReforceXY._MODEL_TYPES[0], get_common_ppo_optuna_params(trial)
)
Sampler for DQN hyperparams
"""
return convert_optuna_params_to_model_params(
- "DQN", get_common_dqn_optuna_params(trial)
+ ReforceXY._MODEL_TYPES[3], get_common_dqn_optuna_params(trial)
)
"""
dqn_optuna_params = get_common_dqn_optuna_params(trial)
dqn_optuna_params.update({"n_quantiles": trial.suggest_int("n_quantiles", 10, 160)})
- return convert_optuna_params_to_model_params("QRDQN", dqn_optuna_params)
+ return convert_optuna_params_to_model_params(
+ ReforceXY._MODEL_TYPES[4], dqn_optuna_params
+ )
import datetime
import logging
from functools import cached_property, reduce
-from typing import Any, Literal, Optional
+from typing import Any, Final, Literal, Optional
# import talib.abstract as ta
from freqtrade.persistence import Trade
logger = logging.getLogger(__name__)
-ACTION_COLUMN = "&-action"
+ACTION_COLUMN: Final = "&-action"
class RLAgentStrategy(IStrategy):
INTERFACE_VERSION = 3
- _TRADING_MODES: tuple[TradingMode, ...] = ("margin", "futures", "spot")
- _TRADE_DIRECTIONS: tuple[TradeDirection, ...] = ("long", "short")
- _ACTION_ENTER_LONG: int = 1
- _ACTION_EXIT_LONG: int = 2
- _ACTION_ENTER_SHORT: int = 3
- _ACTION_EXIT_SHORT: int = 4
+ _TRADING_MODES: Final[tuple[TradingMode, ...]] = ("margin", "futures", "spot")
+ _TRADE_DIRECTIONS: Final[tuple[TradeDirection, ...]] = ("long", "short")
+ _ACTION_ENTER_LONG: Final[int] = 1
+ _ACTION_EXIT_LONG: Final[int] = 2
+ _ACTION_ENTER_SHORT: Final[int] = 3
+ _ACTION_EXIT_SHORT: Final[int] = 4
@cached_property
def can_short(self) -> bool:
import warnings
from functools import cached_property
from pathlib import Path
-from typing import Any, Callable, Literal, Optional
+from typing import Any, Callable, Final, Literal, Optional
import numpy as np
import optuna
debug = False
-TEST_SIZE = 0.1
+TEST_SIZE: Final = 0.1
-EXTREMA_COLUMN = "&s-extrema"
-MAXIMA_THRESHOLD_COLUMN = "&s-maxima_threshold"
-MINIMA_THRESHOLD_COLUMN = "&s-minima_threshold"
+EXTREMA_COLUMN: Final = "&s-extrema"
+MAXIMA_THRESHOLD_COLUMN: Final = "&s-maxima_threshold"
+MINIMA_THRESHOLD_COLUMN: Final = "&s-minima_threshold"
warnings.simplefilter(action="ignore", category=FutureWarning)
version = "3.7.121"
- _SQRT_2 = np.sqrt(2.0)
+ _SQRT_2: Final[float] = np.sqrt(2.0)
- _EXTREMA_SELECTION_METHODS: tuple[ExtremaSelectionMethod, ...] = (
+ _EXTREMA_SELECTION_METHODS: Final[tuple[ExtremaSelectionMethod, ...]] = (
"peak_values",
"extrema_rank",
)
- _OPTUNA_STORAGE_BACKENDS: tuple[str, ...] = ("sqlite", "file")
- _OPTUNA_SAMPLERS: tuple[str, ...] = ("tpe", "auto")
- _OPTUNA_NAMESPACES: tuple[OptunaNamespace, ...] = ("hp", "train", "label")
+ _OPTUNA_STORAGE_BACKENDS: Final[tuple[str, ...]] = ("sqlite", "file")
+ _OPTUNA_SAMPLERS: Final[tuple[str, ...]] = ("tpe", "auto")
+ _OPTUNA_NAMESPACES: Final[tuple[OptunaNamespace, ...]] = ("hp", "train", "label")
@staticmethod
def _extrema_selection_methods_set() -> set[ExtremaSelectionMethod]:
import math
from functools import cached_property, lru_cache, reduce
from pathlib import Path
-from typing import Any, Callable, Literal, Optional, Sequence, Tuple
+from typing import Any, Callable, ClassVar, Final, Literal, Optional, Sequence, Tuple
import numpy as np
import pandas_ta as pta
logger = logging.getLogger(__name__)
-EXTREMA_COLUMN = "&s-extrema"
-MAXIMA_THRESHOLD_COLUMN = "&s-maxima_threshold"
-MINIMA_THRESHOLD_COLUMN = "&s-minima_threshold"
+EXTREMA_COLUMN: Final = "&s-extrema"
+MAXIMA_THRESHOLD_COLUMN: Final = "&s-maxima_threshold"
+MINIMA_THRESHOLD_COLUMN: Final = "&s-minima_threshold"
class QuickAdapterV3(IStrategy):
INTERFACE_VERSION = 3
- _TRADE_DIRECTIONS: tuple[TradeDirection, ...] = ("long", "short")
- _INTERPOLATION_DIRECTIONS: tuple[InterpolationDirection, ...] = (
+ _TRADE_DIRECTIONS: Final[tuple[TradeDirection, ...]] = ("long", "short")
+ _INTERPOLATION_DIRECTIONS: Final[tuple[InterpolationDirection, ...]] = (
"direct",
"inverse",
)
- _ORDER_TYPES: tuple[OrderType, ...] = ("entry", "exit")
- _TRADING_MODES: tuple[TradingMode, ...] = ("spot", "margin", "futures")
+ _ORDER_TYPES: Final[tuple[OrderType, ...]] = ("entry", "exit")
+ _TRADING_MODES: Final[tuple[TradingMode, ...]] = ("spot", "margin", "futures")
def version(self) -> str:
return "3.3.171"
stoploss = -0.025
use_custom_stoploss = True
- default_exit_thresholds: dict[str, float] = {
+ default_exit_thresholds: ClassVar[dict[str, float]] = {
"k_decl_v": 0.6,
"k_decl_a": 0.4,
}
- default_exit_thresholds_calibration: dict[str, float] = {
+ default_exit_thresholds_calibration: ClassVar[dict[str, float]] = {
"decline_quantile": 0.90,
}
- default_reversal_confirmation: dict[str, int | float] = {
+ default_reversal_confirmation: ClassVar[dict[str, int | float]] = {
"lookback_period": 0,
"decay_ratio": 0.5,
"min_natr_ratio_percent": 0.0095,
position_adjustment_enable = True
# {stage: (natr_ratio_percent, stake_percent)}
- partial_exit_stages: dict[int, tuple[float, float]] = {
+ partial_exit_stages: ClassVar[dict[int, tuple[float, float]]] = {
0: (0.4858, 0.4),
1: (0.6180, 0.3),
2: (0.7640, 0.2),
min_natr_ratio_percent: float,
max_natr_ratio_percent: float,
candle_idx: int = -1,
- interpolation_direction: Literal["direct", "inverse"] = "direct",
+ interpolation_direction: InterpolationDirection = "direct",
quantile_exponent: float = 1.5,
) -> float:
df_signature = QuickAdapterV3._df_signature(df)