)
return storage
- def study(self, train_df, total_timesteps: int, dk: FreqaiDataKitchen) -> Dict:
+ def study(
+ self, train_df: DataFrame, total_timesteps: int, dk: FreqaiDataKitchen
+ ) -> Dict:
"""
Runs hyperparameter optimization using Optuna and
returns the best hyperparameters found
return None
def objective(
- self, trial: Trial, train_df, total_timesteps: int, dk: FreqaiDataKitchen
+ self,
+ trial: Trial,
+ train_df: DataFrame,
+ total_timesteps: int,
+ dk: FreqaiDataKitchen,
) -> float:
"""
Defines a single trial for hyperparameter optimization using Optuna
self._non_profit_steps: int = 0
return self._get_observation(), history
- def get_reward_factor_at_trade_exit(
+ def _get_reward_factor_at_trade_exit(
self,
factor: float,
pnl: float,
ForceActions.Stop_loss,
ForceActions.Timeout,
):
- return pnl * self.get_reward_factor_at_trade_exit(
+ return pnl * self._get_reward_factor_at_trade_exit(
factor, pnl, trade_duration, max_trade_duration
)
# close long
if action == Actions.Long_exit.value and self._position == Positions.Long:
- return pnl * self.get_reward_factor_at_trade_exit(
+ return pnl * self._get_reward_factor_at_trade_exit(
factor, pnl, trade_duration, max_trade_duration
)
# close short
if action == Actions.Short_exit.value and self._position == Positions.Short:
- return pnl * self.get_reward_factor_at_trade_exit(
+ return pnl * self._get_reward_factor_at_trade_exit(
factor, pnl, trade_duration, max_trade_duration
)