Merge branch 'freqtrade:develop' into allow-pairs-with-prefix-in-marketcap-pairList

This commit is contained in:
mrpabloyeah
2025-08-25 13:51:16 +02:00
committed by GitHub
85 changed files with 4717 additions and 4107 deletions

View File

@@ -17,7 +17,7 @@ def start_analysis_entries_exits(args: dict[str, Any]) -> None:
from freqtrade.data.entryexitanalysis import process_entry_exit_reasons
# Initialize configuration
config = setup_utils_configuration(args, RunMode.BACKTEST)
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
logger.info("Starting freqtrade in analysis mode")

View File

@@ -54,6 +54,7 @@ ARGS_BACKTEST = [
"strategy_list",
"export",
"exportfilename",
"exportdirectory",
"backtest_breakdown",
"backtest_cache",
"freqai_backtest_live_models",
@@ -94,7 +95,12 @@ ARGS_LIST_FREQAIMODELS = ["freqaimodel_path", "print_one_column"]
ARGS_LIST_HYPEROPTS = ["hyperopt_path", "print_one_column"]
ARGS_BACKTEST_SHOW = ["exportfilename", "backtest_show_pair_list", "backtest_breakdown"]
ARGS_BACKTEST_SHOW = [
"exportfilename",
"exportdirectory",
"backtest_show_pair_list",
"backtest_breakdown",
]
ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all", "trading_mode", "dex_exchanges"]
@@ -233,6 +239,7 @@ ARGS_HYPEROPT_SHOW = [
ARGS_ANALYZE_ENTRIES_EXITS = [
"exportfilename",
"exportdirectory",
"analysis_groups",
"enter_reason_list",
"exit_reason_list",

View File

@@ -199,22 +199,29 @@ AVAILABLE_CLI_OPTIONS = {
"(so `backtest-data.json` becomes `backtest-data-SampleStrategy.json`",
nargs="+",
),
"export": Arg(
"--export",
help="Export backtest results (default: trades).",
choices=constants.EXPORT_OPTIONS,
),
"backtest_notes": Arg(
"--notes",
help="Add notes to the backtest results.",
metavar="TEXT",
),
"export": Arg(
"--export",
help="Export backtest results (default: trades).",
choices=constants.EXPORT_OPTIONS,
),
"exportdirectory": Arg(
"--backtest-directory",
"--export-directory",
help="Directory to use for backtest results. "
"Example: `--export-directory=user_data/backtest_results/`. ",
metavar="PATH",
),
"exportfilename": Arg(
"--export-filename",
"--backtest-filename",
"--export-filename",
help="Use this filename for backtest results."
"Requires `--export` to be set as well. "
"Example: `--export-filename=user_data/backtest_results/backtest_today.json`",
"Example: `--backtest-filename=backtest_results_2020-09-27_16-20-48.json`. "
"Assumes either `user_data/backtest_results/` or `--export-directory` as base directory.",
metavar="PATH",
),
"disableparamexport": Arg(

View File

@@ -146,6 +146,9 @@ def start_list_strategies(args: dict[str, Any]) -> None:
strategy_objs = StrategyResolver.search_all_objects(
config, not args["print_one_column"], config.get("recursive_strategy_search", False)
)
if not strategy_objs:
logger.warning("No strategies found.")
return
# Sort alphabetically
strategy_objs = sorted(strategy_objs, key=lambda x: x["name"])
for obj in strategy_objs:

View File

@@ -72,7 +72,7 @@ def start_backtesting_show(args: dict[str, Any]) -> None:
from freqtrade.data.btanalysis import load_backtest_stats
from freqtrade.optimize.optimize_reports import show_backtest_results, show_sorted_pairlist
results = load_backtest_stats(config["exportfilename"])
results = load_backtest_stats(config["exportdirectory"], config["exportfilename"])
show_backtest_results(config, results)
show_sorted_pairlist(config, results)

View File

@@ -453,6 +453,7 @@ CONF_SCHEMA = {
"pairlists": {
"description": "Configuration for pairlists.",
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
@@ -1381,6 +1382,7 @@ SCHEMA_TRADE_REQUIRED = [
"entry_pricing",
"stoploss",
"minimal_roi",
"pairlists",
"internals",
"dataformat_ohlcv",
"dataformat_trades",
@@ -1390,6 +1392,7 @@ SCHEMA_BACKTEST_REQUIRED = [
"exchange",
"stake_currency",
"stake_amount",
"pairlists",
"dry_run_wallet",
"dataformat_ohlcv",
"dataformat_trades",

View File

@@ -66,7 +66,8 @@ def validate_config_schema(conf: dict[str, Any], preliminary: bool = False) -> d
return conf
except ValidationError as e:
logger.critical(f"Invalid configuration. Reason: {e}")
raise ValidationError(best_match(Draft4Validator(conf_schema).iter_errors(conf)).message)
result = best_match(FreqtradeValidator(conf_schema).iter_errors(conf))
raise ConfigurationError(result.message)
def validate_config_consistency(conf: dict[str, Any], *, preliminary: bool = False) -> None:

View File

@@ -84,9 +84,6 @@ class Configuration:
if "internals" not in config:
config["internals"] = {}
if "pairlists" not in config:
config["pairlists"] = []
# Keep a copy of the original configuration file
config["original_config"] = deepcopy(config)
@@ -212,13 +209,28 @@ class Configuration:
config.update({"datadir": create_datadir(config, self.args.get("datadir"))})
logger.info("Using data directory: %s ...", config.get("datadir"))
self._args_to_config(
config, argname="exportdirectory", logstring="Using {} as backtest directory ..."
)
if self.args.get("exportfilename"):
self._args_to_config(
config, argname="exportfilename", logstring="Storing backtest results to {} ..."
)
config["exportfilename"] = Path(config["exportfilename"])
else:
config["exportfilename"] = config["user_data_dir"] / "backtest_results"
if config.get("exportdirectory") and Path(config["exportdirectory"]).is_dir():
logger.warning(
"DEPRECATED: Using `--export-filename` with directories is deprecated, "
"use `--backtest-directory` instead."
)
if config.get("exportdirectory") is None:
# Fallback - assign export-directory directly.
config["exportdirectory"] = config["exportfilename"]
if not config.get("exportdirectory"):
config["exportdirectory"] = config["user_data_dir"] / "backtest_results"
if not config.get("exportfilename"):
config["exportfilename"] = None
config["exportdirectory"] = Path(config["exportdirectory"])
if self.args.get("show_sensitive"):
logger.warning(

View File

@@ -16,10 +16,7 @@ from .bt_fileutils import (
load_backtest_data,
load_backtest_metadata,
load_backtest_stats,
load_exit_signal_candles,
load_file_from_zip,
load_rejected_signals,
load_signal_candles,
load_trades,
load_trades_from_db,
trade_list_to_dataframe,

View File

@@ -155,33 +155,55 @@ def load_backtest_metadata(filename: Path | str) -> dict[str, Any]:
raise OperationalException("Unexpected error while loading backtest metadata.") from e
def load_backtest_stats(filename: Path | str) -> BacktestResultType:
def _normalize_filename(file_or_directory: Path | str, filename: Path | str | None) -> Path:
"""
Normalize the filename by ensuring it is a Path object.
:param file_or_directory: The directory or file to normalize.
:param filename: The filename to normalize.
:return: A Path object representing the normalized filename.
"""
if isinstance(file_or_directory, str):
file_or_directory = Path(file_or_directory)
if file_or_directory.is_dir():
if not filename:
filename = get_latest_backtest_filename(file_or_directory)
if Path(filename).is_file():
fn = Path(filename)
else:
fn = file_or_directory / filename
else:
fn = file_or_directory
return fn
def load_backtest_stats(
file_or_directory: Path | str, filename: Path | str | None = None
) -> BacktestResultType:
"""
Load backtest statistics file.
:param filename: pathlib.Path object, or string pointing to the file.
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
or absolute/relative path to the backtest results file.
:param filename: Optional filename to load from (if different from the main filename).
Only valid when loading from a directory.
:return: a dictionary containing the resulting file.
"""
if isinstance(filename, str):
filename = Path(filename)
if filename.is_dir():
filename = filename / get_latest_backtest_filename(filename)
if not filename.is_file():
raise ValueError(f"File {filename} does not exist.")
logger.info(f"Loading backtest result from {filename}")
fn = _normalize_filename(file_or_directory, filename)
if filename.suffix == ".zip":
if not fn.is_file():
raise ValueError(f"File or directory {fn} does not exist.")
logger.info(f"Loading backtest result from {fn}")
if fn.suffix == ".zip":
data = json_load(
StringIO(
load_file_from_zip(filename, filename.with_suffix(".json").name).decode("utf-8")
)
StringIO(load_file_from_zip(fn, fn.with_suffix(".json").name).decode("utf-8"))
)
else:
with filename.open() as file:
with fn.open() as file:
data = json_load(file)
# Legacy list format does not contain metadata.
if isinstance(data, dict):
data["metadata"] = load_backtest_metadata(filename)
data["metadata"] = load_backtest_metadata(fn)
return data
@@ -362,16 +384,21 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
return df
def load_backtest_data(filename: Path | str, strategy: str | None = None) -> pd.DataFrame:
def load_backtest_data(
file_or_directory: Path | str, strategy: str | None = None, filename: Path | str | None = None
) -> pd.DataFrame:
"""
Load backtest data file.
:param filename: pathlib.Path object, or string pointing to a file or directory
Load backtest data file, returns a dataframe with the individual trades.
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
or absolute/relative path to the backtest results file.
:param strategy: Strategy to load - mainly relevant for multi-strategy backtests
Can also serve as protection to load the correct result.
:param filename: Optional filename to load from (if different from the main filename).
Only valid when loading from a directory.
:return: a dataframe with the analysis results
:raise: ValueError if loading goes wrong.
"""
data = load_backtest_stats(filename)
data = load_backtest_stats(file_or_directory, filename)
if not isinstance(data, list):
# new, nested format
if "strategy" not in data:
@@ -430,20 +457,23 @@ def load_file_from_zip(zip_path: Path, filename: str) -> bytes:
raise ValueError(f"Bad zip file: {zip_path}.") from None
def load_backtest_analysis_data(backtest_dir: Path, name: str):
def load_backtest_analysis_data(
file_or_directory: Path,
name: Literal["signals", "rejected", "exited"],
filename: Path | str | None = None,
):
"""
Load backtest analysis data either from a pickle file or from within a zip file
:param backtest_dir: Directory containing backtest results
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
or absolute/relative path to the backtest results file.
:param name: Name of the analysis data to load (signals, rejected, exited)
:param filename: Optional filename to load from (if different from the main filename).
Only valid when loading from a directory.
:return: Analysis data
"""
import joblib
if backtest_dir.is_dir():
lbf = Path(get_latest_backtest_filename(backtest_dir))
zip_path = backtest_dir / lbf
else:
zip_path = backtest_dir
zip_path = _normalize_filename(file_or_directory, filename)
if zip_path.suffix == ".zip":
# Load from zip file
@@ -458,10 +488,10 @@ def load_backtest_analysis_data(backtest_dir: Path, name: str):
else:
# Load from separate pickle file
if backtest_dir.is_dir():
scpf = Path(backtest_dir, f"{zip_path.stem}_{name}.pkl")
if file_or_directory.is_dir():
scpf = Path(file_or_directory, f"{zip_path.stem}_{name}.pkl")
else:
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
scpf = Path(file_or_directory.parent / f"{file_or_directory.stem}_{name}.pkl")
try:
with scpf.open("rb") as scp:
@@ -473,27 +503,6 @@ def load_backtest_analysis_data(backtest_dir: Path, name: str):
return None
def load_rejected_signals(backtest_dir: Path):
"""
Load rejected signals from backtest directory
"""
return load_backtest_analysis_data(backtest_dir, "rejected")
def load_signal_candles(backtest_dir: Path):
"""
Load signal candles from backtest directory
"""
return load_backtest_analysis_data(backtest_dir, "signals")
def load_exit_signal_candles(backtest_dir: Path) -> dict[str, dict[str, pd.DataFrame]]:
"""
Load exit signal candles from backtest directory
"""
return load_backtest_analysis_data(backtest_dir, "exited")
def trade_list_to_dataframe(trades: list[Trade] | list[LocalTrade]) -> pd.DataFrame:
"""
Convert list of Trade objects to pandas Dataframe

View File

@@ -7,11 +7,9 @@ from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.data.btanalysis import (
BT_DATA_COLUMNS,
load_backtest_analysis_data,
load_backtest_data,
load_backtest_stats,
load_exit_signal_candles,
load_rejected_signals,
load_signal_candles,
)
from freqtrade.exceptions import ConfigurationError, OperationalException
from freqtrade.util import print_df_rich_table
@@ -332,7 +330,7 @@ def process_entry_exit_reasons(config: Config):
do_rejected = config.get("analysis_rejected", False)
to_csv = config.get("analysis_to_csv", False)
csv_path = Path(
config.get("analysis_csv_path", config["exportfilename"]), # type: ignore[arg-type]
config.get("analysis_csv_path", config["exportdirectory"]), # type: ignore[arg-type]
)
if entry_only is True and exit_only is True:
@@ -346,20 +344,30 @@ def process_entry_exit_reasons(config: Config):
None if config.get("timerange") is None else str(config.get("timerange"))
)
try:
backtest_stats = load_backtest_stats(config["exportfilename"])
backtest_stats = load_backtest_stats(
config["exportdirectory"], config["exportfilename"]
)
except ValueError as e:
raise ConfigurationError(e) from e
for strategy_name, results in backtest_stats["strategy"].items():
trades = load_backtest_data(config["exportfilename"], strategy_name)
trades = load_backtest_data(
config["exportdirectory"], strategy_name, config["exportfilename"]
)
if trades is not None and not trades.empty:
signal_candles = load_signal_candles(config["exportfilename"])
exit_signals = load_exit_signal_candles(config["exportfilename"])
signal_candles = load_backtest_analysis_data(
config["exportdirectory"], "signals", config["exportfilename"]
)
exit_signals = load_backtest_analysis_data(
config["exportdirectory"], "exited", config["exportfilename"]
)
rej_df = None
if do_rejected:
rejected_signals_dict = load_rejected_signals(config["exportfilename"])
rejected_signals_dict = load_backtest_analysis_data(
config["exportdirectory"], "rejected", config["exportfilename"]
)
rej_df = prepare_results(
rejected_signals_dict,
strategy_name,

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,18 @@
import logging
from datetime import timedelta
import ccxt
from freqtrade.enums import CandleType
from freqtrade.exceptions import (
DDosProtection,
OperationalException,
RetryableOrderError,
TemporaryError,
)
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.common import API_RETRY_COUNT, retrier
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.util.datetime_helpers import dt_now, dt_ts
@@ -21,6 +30,10 @@ class Bitget(Exchange):
"""
_ft_has: FtHas = {
"stoploss_on_exchange": True,
"stop_price_param": "stopPrice",
"stop_price_prop": "stopPrice",
"stoploss_order_types": {"limit": "limit", "market": "market"},
"ohlcv_candle_limit": 200, # 200 for historical candles, 1000 for recent ones.
"order_time_in_force": ["GTC", "FOK", "IOC", "PO"],
}
@@ -44,9 +57,72 @@ class Bitget(Exchange):
timeframe_map = self._api.options["fetchOHLCV"]["maxRecentDaysPerTimeframe"]
days = timeframe_map.get(timeframe, 30)
if candle_type in (CandleType.FUTURES, CandleType.SPOT) and (
if candle_type in (CandleType.FUTURES, CandleType.SPOT, CandleType.MARK) and (
not since_ms or dt_ts(dt_now() - timedelta(days=days)) < since_ms
):
return 1000
return super().ohlcv_candle_limit(timeframe, candle_type, since_ms)
def _convert_stop_order(self, pair: str, order_id: str, order: CcxtOrder) -> CcxtOrder:
if order.get("status", "open") == "closed":
# Use orderID as cliendOrderId filter to fetch the regular followup order.
# Could be done with "fetch_order" - but clientOid as filter doesn't seem to work
# https://www.bitget.com/api-doc/spot/trade/Get-Order-Info
for method in (
self._api.fetch_canceled_and_closed_orders,
self._api.fetch_open_orders,
):
orders = method(pair)
orders_f = [order for order in orders if order["clientOrderId"] == order_id]
if orders_f:
order_reg = orders_f[0]
self._log_exchange_response("fetch_stoploss_order1", order_reg)
order_reg["id_stop"] = order_reg["id"]
order_reg["id"] = order_id
order_reg["type"] = "stoploss"
order_reg["status_stop"] = "triggered"
return order_reg
order = self._order_contracts_to_amount(order)
order["type"] = "stoploss"
return order
def _fetch_stop_order_fallback(self, order_id: str, pair: str) -> CcxtOrder:
params2 = {
"stop": True,
}
for method in (
self._api.fetch_open_orders,
self._api.fetch_canceled_and_closed_orders,
):
try:
orders = method(pair, params=params2)
orders_f = [order for order in orders if order["id"] == order_id]
if orders_f:
order = orders_f[0]
self._log_exchange_response("get_stop_order_fallback", order)
return self._convert_stop_order(pair, order_id, order)
except (ccxt.OrderNotFound, ccxt.InvalidOrder):
pass
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
raise TemporaryError(
f"Could not get order due to {e.__class__.__name__}. Message: {e}"
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
raise RetryableOrderError(f"StoplossOrder not found (pair: {pair} id: {order_id}).")
@retrier(retries=API_RETRY_COUNT)
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
if self._config["dry_run"]:
return self.fetch_dry_run_order(order_id)
return self._fetch_stop_order_fallback(order_id, pair)
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True})

View File

@@ -70,34 +70,29 @@ class LookaheadAnalysis(BaseAnalysis):
cut_df: DataFrame = cut_vars.indicators[current_pair]
full_df: DataFrame = full_vars.indicators[current_pair]
# cut longer dataframe to length of the shorter
full_df_cut = full_df[(full_df.date == cut_vars.compared_dt)].reset_index(drop=True)
cut_df_cut = cut_df[(cut_df.date == cut_vars.compared_dt)].reset_index(drop=True)
# trim full_df to the same index and length as cut_df
cut_full_df = full_df.loc[cut_df.index]
compare_df = cut_full_df.compare(cut_df)
# check if dataframes are not empty
if full_df_cut.shape[0] != 0 and cut_df_cut.shape[0] != 0:
# compare dataframes
compare_df = full_df_cut.compare(cut_df_cut)
if compare_df.shape[0] > 0:
for col_name in compare_df:
col_idx = compare_df.columns.get_loc(col_name)
compare_df_row = compare_df.iloc[0]
# compare_df now comprises tuples with [1] having either 'self' or 'other'
if "other" in col_name[1]:
continue
self_value = compare_df_row.iloc[col_idx]
other_value = compare_df_row.iloc[col_idx + 1]
if compare_df.shape[0] > 0:
for col_name, values in compare_df.items():
col_idx = compare_df.columns.get_loc(col_name)
compare_df_row = compare_df.iloc[0]
# compare_df now comprises tuples with [1] having either 'self' or 'other'
if "other" in col_name[1]:
continue
self_value = compare_df_row.iloc[col_idx]
other_value = compare_df_row.iloc[col_idx + 1]
# output differences
if self_value != other_value:
if not self.current_analysis.false_indicators.__contains__(col_name[0]):
self.current_analysis.false_indicators.append(col_name[0])
logger.info(
f"=> found look ahead bias in indicator "
f"{col_name[0]}. "
f"{str(self_value)} != {str(other_value)}"
)
# output differences
if self_value != other_value:
if not self.current_analysis.false_indicators.__contains__(col_name[0]):
self.current_analysis.false_indicators.append(col_name[0])
logger.info(
f"=> found look ahead bias in column "
f"{col_name[0]}. "
f"{str(self_value)} != {str(other_value)}"
)
def prepare_data(self, varholder: VarHolder, pairs_to_load: list[DataFrame]):
if "freqai" in self.local_config and "identifier" in self.local_config["freqai"]:
@@ -132,7 +127,13 @@ class LookaheadAnalysis(BaseAnalysis):
varholder.data, varholder.timerange = backtesting.load_bt_data()
varholder.timeframe = backtesting.timeframe
varholder.indicators = backtesting.strategy.advise_all_indicators(varholder.data)
temp_indicators = backtesting.strategy.advise_all_indicators(varholder.data)
filled_indicators = dict()
for pair, dataframe in temp_indicators.items():
filled_indicators[pair] = backtesting.strategy.ft_advise_signals(
dataframe, {"pair": pair}
)
varholder.indicators = filled_indicators
varholder.result = self.get_result(backtesting, varholder.indicators)
def fill_entry_and_exit_varHolders(self, result_row):

View File

@@ -15,7 +15,6 @@ from freqtrade.loggers.set_log_levels import (
)
from freqtrade.optimize.backtesting import Backtesting
from freqtrade.optimize.base_analysis import BaseAnalysis, VarHolder
from freqtrade.resolvers import StrategyResolver
logger = logging.getLogger(__name__)
@@ -33,13 +32,6 @@ class RecursiveAnalysis(BaseAnalysis):
super().__init__(config, strategy_obj)
strat = StrategyResolver.load_strategy(config)
self._strat_scc = strat.startup_candle_count
if self._strat_scc not in self._startup_candle:
self._startup_candle.append(self._strat_scc)
self._startup_candle.sort()
self.partial_varHolder_array: list[VarHolder] = []
self.partial_varHolder_lookahead_array: list[VarHolder] = []
@@ -149,6 +141,13 @@ class RecursiveAnalysis(BaseAnalysis):
self.local_config["candle_type_def"] = prepare_data_config["candle_type_def"]
backtesting._set_strategy(backtesting.strategylist[0])
strat = backtesting.strategy
self._strat_scc = strat.startup_candle_count
if self._strat_scc not in self._startup_candle:
self._startup_candle.append(self._strat_scc)
self._startup_candle.sort()
varholder.data, varholder.timerange = backtesting.load_bt_data()
varholder.timeframe = backtesting.timeframe

View File

@@ -17,7 +17,7 @@ class RecursiveAnalysisSubFunctions:
@staticmethod
def text_table_recursive_analysis_instances(recursive_instances: list[RecursiveAnalysis]):
startups = recursive_instances[0]._startup_candle
strat_scc = recursive_instances[0]._strat_scc
strat_scc = getattr(recursive_instances[0], "_strat_scc", 0) or 0
headers = ["Indicators"]
for candle in startups:
if candle == strat_scc:

View File

@@ -64,7 +64,7 @@ def store_backtest_results(
:param market_change_data: Dataframe containing market change data
:param analysis_results: Dictionary containing analysis results
"""
recordfilename: Path = config["exportfilename"]
recordfilename: Path = config["exportdirectory"]
zip_filename = _generate_filename(recordfilename, dtappendix, ".zip")
base_filename = _generate_filename(recordfilename, dtappendix, "")
json_filename = _generate_filename(recordfilename, dtappendix, ".json")

View File

@@ -1187,10 +1187,13 @@ class LocalTrade:
"""
close_trade_value = self.calc_close_trade_value(rate, amount)
if amount is None or open_rate is None:
if (amount is None) and (open_rate is None):
open_trade_value = self.open_trade_value
else:
open_trade_value = self._calc_open_trade_value(amount, open_rate)
# Fall back to trade.amount and self.open_rate if necessary
open_trade_value = self._calc_open_trade_value(
amount or self.amount, open_rate or self.open_rate
)
if open_trade_value == 0.0:
return 0.0

View File

@@ -31,8 +31,9 @@ class AgeFilter(IPairList):
self._min_days_listed = self._pairlistconfig.get("min_days_listed", 10)
self._max_days_listed = self._pairlistconfig.get("max_days_listed")
self._def_candletype = self._config["candle_type_def"]
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
if self._min_days_listed < 1:
raise OperationalException("AgeFilter requires min_days_listed to be >= 1")
if self._min_days_listed > candle_limit:
@@ -100,7 +101,7 @@ class AgeFilter(IPairList):
:return: new allowlist
"""
needed_pairs: ListPairsWithTimeframes = [
(p, "1d", self._config["candle_type_def"])
(p, "1d", self._def_candletype)
for p in pairlist
if p not in self._symbolsChecked and p not in self._symbolsCheckFailed
]
@@ -116,8 +117,8 @@ class AgeFilter(IPairList):
if self._enabled:
for p in deepcopy(pairlist):
daily_candles = (
candles[(p, "1d", self._config["candle_type_def"])]
if (p, "1d", self._config["candle_type_def"]) in candles
candles[(p, "1d", self._def_candletype)]
if (p, "1d", self._def_candletype) in candles
else None
)
if not self._validate_pair_loc(p, daily_candles):

View File

@@ -37,7 +37,6 @@ class MarketCapPairList(IPairList):
self._refresh_period = self._pairlistconfig.get("refresh_period", 86400)
self._categories = self._pairlistconfig.get("categories", [])
self._marketcap_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period)
self._def_candletype = self._config["candle_type_def"]
_coingecko_config = self._config.get("coingecko", {})
@@ -225,7 +224,7 @@ class MarketCapPairList(IPairList):
if marketcap_list:
filtered_pairlist: list[str] = []
market = self._config["trading_mode"]
market = self._exchange._config["trading_mode"]
pair_format = f"{self._stake_currency.upper()}"
if market == "futures":
pair_format += f":{self._stake_currency.upper()}"

View File

@@ -91,7 +91,7 @@ class PercentChangePairList(IPairList):
)
candle_limit = self._exchange.ohlcv_candle_limit(
self._lookback_timeframe, self._config["candle_type_def"]
self._lookback_timeframe, self._def_candletype
)
if self._lookback_period > candle_limit:

View File

@@ -40,7 +40,7 @@ class VolatilityFilter(IPairList):
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
if self._days < 1:
raise OperationalException("VolatilityFilter requires lookback_days to be >= 1")
if self._days > candle_limit:

View File

@@ -89,7 +89,7 @@ class VolumePairList(IPairList):
raise OperationalException(f"key {self._sort_key} not in {SORT_VALUES}")
candle_limit = self._exchange.ohlcv_candle_limit(
self._lookback_timeframe, self._config["candle_type_def"]
self._lookback_timeframe, self._def_candletype
)
if self._lookback_period < 0:
raise OperationalException("VolumeFilter requires lookback_period to be >= 0")

View File

@@ -34,7 +34,7 @@ class RangeStabilityFilter(IPairList):
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
if self._days < 1:
raise OperationalException("RangeStabilityFilter requires lookback_days to be >= 1")
if self._days > candle_limit:

View File

@@ -54,6 +54,7 @@ def __run_pairlist(job_id: str, config_loc: Config):
with FtNoDBContext():
exchange = get_exchange(config_loc)
config_loc["candle_type_def"] = exchange._config["candle_type_def"]
pairlists = PairListManager(exchange, config_loc)
pairlists.refresh_pairlist()
ApiBG.jobs[job_id]["result"] = {

View File

@@ -342,8 +342,22 @@ class Telegram(RPCHandler):
self._loop.run_until_complete(self._startup_telegram())
async def _startup_telegram(self) -> None:
await self._app.initialize()
await self._app.start()
retries = 3
attempt = 0
while attempt < retries:
try:
await self._app.initialize()
await self._app.start()
break
except Exception as ex:
logger.error(
"Error starting Telegram bot (attempt %d/%d): %s", attempt + 1, retries, ex
)
attempt += 1
if attempt == retries:
logger.warning("Telegram init failed.")
return
await asyncio.sleep(2)
if self._app.updater:
await self._app.updater.start_polling(
bootstrap_retries=-1,

View File

@@ -225,7 +225,6 @@ class RealParameter(NumericParameter):
class DecimalParameter(NumericParameter):
default: float
value: float
def __init__(
self,
@@ -259,6 +258,14 @@ class DecimalParameter(NumericParameter):
low=low, high=high, default=default, space=space, optimize=optimize, load=load, **kwargs
)
@property
def value(self) -> float:
return self._value
@value.setter
def value(self, new_value: float):
self._value = round(new_value, self._decimals)
def get_space(self, name: str) -> "SKDecimal":
"""
Create optimization space.