)
if (
not isinstance(self.freqai_info.get("identifier"), str)
- or self.freqai_info.get("identifier").strip() == ""
+ or not self.freqai_info.get("identifier").strip()
):
raise ValueError(
"FreqAI model requires 'identifier' defined in the freqai section configuration"
direction: Optional[optuna.study.StudyDirection] = None,
directions: Optional[list[optuna.study.StudyDirection]] = None,
) -> Optional[optuna.study.Study]:
+ if direction is not None and directions is not None:
+ raise ValueError(
+ "Cannot specify both 'direction' and 'directions'. Use one or the other"
+ )
is_study_single_objective = direction is not None and directions is None
if (
not is_study_single_objective
direction: Optional[optuna.study.StudyDirection] = None,
directions: Optional[list[optuna.study.StudyDirection]] = None,
) -> Optional[optuna.study.Study]:
+ if direction is not None and directions is not None:
+ raise ValueError(
+ "Cannot specify both 'direction' and 'directions'. Use one or the other"
+ )
identifier = self.freqai_info.get("identifier")
study_name = f"{identifier}-{pair}-{namespace}"
try:
)
if (
not isinstance(self.freqai_info.get("identifier"), str)
- or self.freqai_info.get("identifier").strip() == ""
+ or not self.freqai_info.get("identifier").strip()
):
raise ValueError(
"FreqAI strategy requires 'identifier' defined in the freqai section configuration"
f"{pair}: no extrema to label (label_period={QuickAdapterV3.td_format(label_period)} / {label_period_candles=} / {label_natr_ratio=:.2f})"
)
else:
- for pivot_idx, pivot_dir in zip(pivots_indices, pivots_directions):
- dataframe.at[pivot_idx, EXTREMA_COLUMN] = pivot_dir
+ dataframe.loc[pivots_indices, EXTREMA_COLUMN] = pivots_directions
dataframe["minima"] = np.where(
dataframe[EXTREMA_COLUMN] == TrendDirection.DOWN, -1, 0
)
current_rate: float,
natr_ratio_percent: float,
) -> Optional[float]:
+ if not (0.0 <= natr_ratio_percent <= 1.0):
+ raise ValueError(
+ f"natr_ratio_percent must be in [0, 1], got {natr_ratio_percent}"
+ )
trade_duration_candles = self.get_trade_duration_candles(df, trade)
if not QuickAdapterV3.is_trade_duration_valid(trade_duration_candles):
return None
candle_duration_secs = max(1, int(self._candle_duration_secs))
candle_start_secs = (timestamp // candle_duration_secs) * candle_duration_secs
callback_hash = get_callable_sha256(callback)
- key = hashlib.sha256(f"{pair}|{callback_hash}".encode()).hexdigest()
+ key = hashlib.sha256(f"{pair}\x00{callback_hash}".encode()).hexdigest()
if candle_start_secs != self.last_candle_start_secs.get(key):
self.last_candle_start_secs[key] = candle_start_secs
try:
f"Error executing callback for {pair}: {repr(e)}", exc_info=True
)
+ threshold_secs = 10 * candle_duration_secs
+ keys_to_remove = [
+ key
+ for key, ts in self.last_candle_start_secs.items()
+ if ts is not None and timestamp - ts > threshold_secs
+ ]
+ for key in keys_to_remove:
+ del self.last_candle_start_secs[key]
+
def custom_stoploss(
self,
pair: str,
if zero_lag:
series = calculate_zero_lag(series, period=period)
- smma = pd.Series(np.nan, index=series.index)
- smma.iloc[period - 1] = series.iloc[:period].mean()
+ values = series.to_numpy()
+
+ smma_values = np.full(n, np.nan)
+ smma_values[period - 1] = np.mean(values[:period])
for i in range(period, n):
- smma.iloc[i] = (smma.iloc[i - 1] * (period - 1) + series.iloc[i]) / period
+ smma_values[i] = (smma_values[i - 1] * (period - 1) + values[i]) / period
+
+ smma = pd.Series(smma_values, index=series.index)
if offset != 0:
smma = smma.shift(offset)
raise ValueError(
f"Unsupported regressor model: {regressor} (supported: {', '.join(regressors)})"
)
+ if not isinstance(expansion_ratio, (int, float)) or not (
+ 0.0 <= expansion_ratio <= 1.0
+ ):
+ raise ValueError(
+ f"expansion_ratio must be a float between 0 and 1, got {expansion_ratio}"
+ )
default_ranges = {
"n_estimators": (100, 2000),
"learning_rate": (1e-3, 0.5),