Merge branch 'main-stash' of https://github.com/stash86/freqtrade into main-stash

This commit is contained in:
Stefano
2025-08-22 08:40:17 +09:00
42 changed files with 3273 additions and 3439 deletions

View File

@@ -54,6 +54,7 @@ ARGS_BACKTEST = [
"strategy_list",
"export",
"exportfilename",
"exportdirectory",
"backtest_breakdown",
"backtest_cache",
"freqai_backtest_live_models",
@@ -94,7 +95,12 @@ ARGS_LIST_FREQAIMODELS = ["freqaimodel_path", "print_one_column"]
ARGS_LIST_HYPEROPTS = ["hyperopt_path", "print_one_column"]
ARGS_BACKTEST_SHOW = ["exportfilename", "backtest_show_pair_list", "backtest_breakdown"]
ARGS_BACKTEST_SHOW = [
"exportfilename",
"exportdirectory",
"backtest_show_pair_list",
"backtest_breakdown",
]
ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all", "trading_mode", "dex_exchanges"]
@@ -233,6 +239,7 @@ ARGS_HYPEROPT_SHOW = [
ARGS_ANALYZE_ENTRIES_EXITS = [
"exportfilename",
"exportdirectory",
"analysis_groups",
"enter_reason_list",
"exit_reason_list",

View File

@@ -199,21 +199,29 @@ AVAILABLE_CLI_OPTIONS = {
"(so `backtest-data.json` becomes `backtest-data-SampleStrategy.json`",
nargs="+",
),
"export": Arg(
"--export",
help="Export backtest results (default: trades).",
choices=constants.EXPORT_OPTIONS,
),
"backtest_notes": Arg(
"--notes",
help="Add notes to the backtest results.",
metavar="TEXT",
),
"export": Arg(
"--export",
help="Export backtest results (default: trades).",
choices=constants.EXPORT_OPTIONS,
),
"exportdirectory": Arg(
"--backtest-directory",
"--export-directory",
help="Directory to use for backtest results. "
"Example: `--export-directory=user_data/backtest_results/`. ",
metavar="PATH",
),
"exportfilename": Arg(
"--backtest-filename",
"--export-filename",
help="Use this filename for backtest results."
"Example: `--backtest-filename=user_data/backtest_results/`",
"Example: `--backtest-filename=backtest_results_2020-09-27_16-20-48.json`. "
"Assumes either `user_data/backtest_results/` or `--export-directory` as base directory.",
metavar="PATH",
),
"disableparamexport": Arg(

View File

@@ -72,7 +72,7 @@ def start_backtesting_show(args: dict[str, Any]) -> None:
from freqtrade.data.btanalysis import load_backtest_stats
from freqtrade.optimize.optimize_reports import show_backtest_results, show_sorted_pairlist
results = load_backtest_stats(config["exportfilename"])
results = load_backtest_stats(config["exportdirectory"], config["exportfilename"])
show_backtest_results(config, results)
show_sorted_pairlist(config, results)

View File

@@ -209,13 +209,28 @@ class Configuration:
config.update({"datadir": create_datadir(config, self.args.get("datadir"))})
logger.info("Using data directory: %s ...", config.get("datadir"))
self._args_to_config(
config, argname="exportdirectory", logstring="Using {} as backtest directory ..."
)
if self.args.get("exportfilename"):
self._args_to_config(
config, argname="exportfilename", logstring="Storing backtest results to {} ..."
)
config["exportfilename"] = Path(config["exportfilename"])
else:
config["exportfilename"] = config["user_data_dir"] / "backtest_results"
if config.get("exportdirectory") and Path(config["exportdirectory"]).is_dir():
logger.warning(
"DEPRECATED: Using `--export-filename` with directories is deprecated, "
"use `--backtest-directory` instead."
)
if config.get("exportdirectory") is None:
# Fallback - assign export-directory directly.
config["exportdirectory"] = config["exportfilename"]
if not config.get("exportdirectory"):
config["exportdirectory"] = config["user_data_dir"] / "backtest_results"
if not config.get("exportfilename"):
config["exportfilename"] = None
config["exportdirectory"] = Path(config["exportdirectory"])
if self.args.get("show_sensitive"):
logger.warning(

View File

@@ -155,33 +155,55 @@ def load_backtest_metadata(filename: Path | str) -> dict[str, Any]:
raise OperationalException("Unexpected error while loading backtest metadata.") from e
def load_backtest_stats(filename: Path | str) -> BacktestResultType:
def _normalize_filename(file_or_directory: Path | str, filename: Path | str | None) -> Path:
"""
Normalize the filename by ensuring it is a Path object.
:param file_or_directory: The directory or file to normalize.
:param filename: The filename to normalize.
:return: A Path object representing the normalized filename.
"""
if isinstance(file_or_directory, str):
file_or_directory = Path(file_or_directory)
if file_or_directory.is_dir():
if not filename:
filename = get_latest_backtest_filename(file_or_directory)
if Path(filename).is_file():
fn = Path(filename)
else:
fn = file_or_directory / filename
else:
fn = file_or_directory
return fn
def load_backtest_stats(
file_or_directory: Path | str, filename: Path | str | None = None
) -> BacktestResultType:
"""
Load backtest statistics file.
:param filename: pathlib.Path object, or string pointing to the file.
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
or absolute/relative path to the backtest results file.
:param filename: Optional filename to load from (if different from the main filename).
Only valid when loading from a directory.
:return: a dictionary containing the resulting file.
"""
if isinstance(filename, str):
filename = Path(filename)
if filename.is_dir():
filename = filename / get_latest_backtest_filename(filename)
if not filename.is_file():
raise ValueError(f"File {filename} does not exist.")
logger.info(f"Loading backtest result from {filename}")
fn = _normalize_filename(file_or_directory, filename)
if filename.suffix == ".zip":
if not fn.is_file():
raise ValueError(f"File or directory {fn} does not exist.")
logger.info(f"Loading backtest result from {fn}")
if fn.suffix == ".zip":
data = json_load(
StringIO(
load_file_from_zip(filename, filename.with_suffix(".json").name).decode("utf-8")
)
StringIO(load_file_from_zip(fn, fn.with_suffix(".json").name).decode("utf-8"))
)
else:
with filename.open() as file:
with fn.open() as file:
data = json_load(file)
# Legacy list format does not contain metadata.
if isinstance(data, dict):
data["metadata"] = load_backtest_metadata(filename)
data["metadata"] = load_backtest_metadata(fn)
return data
@@ -362,16 +384,21 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
return df
def load_backtest_data(filename: Path | str, strategy: str | None = None) -> pd.DataFrame:
def load_backtest_data(
file_or_directory: Path | str, strategy: str | None = None, filename: Path | str | None = None
) -> pd.DataFrame:
"""
Load backtest data file.
:param filename: pathlib.Path object, or string pointing to a file or directory
Load backtest data file, returns a dataframe with the individual trades.
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
or absolute/relative path to the backtest results file.
:param strategy: Strategy to load - mainly relevant for multi-strategy backtests
Can also serve as protection to load the correct result.
:param filename: Optional filename to load from (if different from the main filename).
Only valid when loading from a directory.
:return: a dataframe with the analysis results
:raise: ValueError if loading goes wrong.
"""
data = load_backtest_stats(filename)
data = load_backtest_stats(file_or_directory, filename)
if not isinstance(data, list):
# new, nested format
if "strategy" not in data:
@@ -430,20 +457,23 @@ def load_file_from_zip(zip_path: Path, filename: str) -> bytes:
raise ValueError(f"Bad zip file: {zip_path}.") from None
def load_backtest_analysis_data(backtest_dir: Path, name: Literal["signals", "rejected", "exited"]):
def load_backtest_analysis_data(
file_or_directory: Path,
name: Literal["signals", "rejected", "exited"],
filename: Path | str | None = None,
):
"""
Load backtest analysis data either from a pickle file or from within a zip file
:param backtest_dir: Directory containing backtest results
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
or absolute/relative path to the backtest results file.
:param name: Name of the analysis data to load (signals, rejected, exited)
:param filename: Optional filename to load from (if different from the main filename).
Only valid when loading from a directory.
:return: Analysis data
"""
import joblib
if backtest_dir.is_dir():
lbf = Path(get_latest_backtest_filename(backtest_dir))
zip_path = backtest_dir / lbf
else:
zip_path = backtest_dir
zip_path = _normalize_filename(file_or_directory, filename)
if zip_path.suffix == ".zip":
# Load from zip file
@@ -458,10 +488,10 @@ def load_backtest_analysis_data(backtest_dir: Path, name: Literal["signals", "re
else:
# Load from separate pickle file
if backtest_dir.is_dir():
scpf = Path(backtest_dir, f"{zip_path.stem}_{name}.pkl")
if file_or_directory.is_dir():
scpf = Path(file_or_directory, f"{zip_path.stem}_{name}.pkl")
else:
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
scpf = Path(file_or_directory.parent / f"{file_or_directory.stem}_{name}.pkl")
try:
with scpf.open("rb") as scp:

View File

@@ -330,7 +330,7 @@ def process_entry_exit_reasons(config: Config):
do_rejected = config.get("analysis_rejected", False)
to_csv = config.get("analysis_to_csv", False)
csv_path = Path(
config.get("analysis_csv_path", config["exportfilename"]), # type: ignore[arg-type]
config.get("analysis_csv_path", config["exportdirectory"]), # type: ignore[arg-type]
)
if entry_only is True and exit_only is True:
@@ -344,21 +344,29 @@ def process_entry_exit_reasons(config: Config):
None if config.get("timerange") is None else str(config.get("timerange"))
)
try:
backtest_stats = load_backtest_stats(config["exportfilename"])
backtest_stats = load_backtest_stats(
config["exportdirectory"], config["exportfilename"]
)
except ValueError as e:
raise ConfigurationError(e) from e
for strategy_name, results in backtest_stats["strategy"].items():
trades = load_backtest_data(config["exportfilename"], strategy_name)
trades = load_backtest_data(
config["exportdirectory"], strategy_name, config["exportfilename"]
)
if trades is not None and not trades.empty:
signal_candles = load_backtest_analysis_data(config["exportfilename"], "signals")
exit_signals = load_backtest_analysis_data(config["exportfilename"], "exited")
signal_candles = load_backtest_analysis_data(
config["exportdirectory"], "signals", config["exportfilename"]
)
exit_signals = load_backtest_analysis_data(
config["exportdirectory"], "exited", config["exportfilename"]
)
rej_df = None
if do_rejected:
rejected_signals_dict = load_backtest_analysis_data(
config["exportfilename"], "rejected"
config["exportdirectory"], "rejected", config["exportfilename"]
)
rej_df = prepare_results(
rejected_signals_dict,

File diff suppressed because it is too large Load Diff

View File

@@ -64,7 +64,7 @@ def store_backtest_results(
:param market_change_data: Dataframe containing market change data
:param analysis_results: Dictionary containing analysis results
"""
recordfilename: Path = config["exportfilename"]
recordfilename: Path = config["exportdirectory"]
zip_filename = _generate_filename(recordfilename, dtappendix, ".zip")
base_filename = _generate_filename(recordfilename, dtappendix, "")
json_filename = _generate_filename(recordfilename, dtappendix, ".json")

View File

@@ -31,8 +31,9 @@ class AgeFilter(IPairList):
self._min_days_listed = self._pairlistconfig.get("min_days_listed", 10)
self._max_days_listed = self._pairlistconfig.get("max_days_listed")
self._def_candletype = self._config["candle_type_def"]
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
if self._min_days_listed < 1:
raise OperationalException("AgeFilter requires min_days_listed to be >= 1")
if self._min_days_listed > candle_limit:
@@ -100,7 +101,7 @@ class AgeFilter(IPairList):
:return: new allowlist
"""
needed_pairs: ListPairsWithTimeframes = [
(p, "1d", self._config["candle_type_def"])
(p, "1d", self._def_candletype)
for p in pairlist
if p not in self._symbolsChecked and p not in self._symbolsCheckFailed
]
@@ -116,8 +117,8 @@ class AgeFilter(IPairList):
if self._enabled:
for p in deepcopy(pairlist):
daily_candles = (
candles[(p, "1d", self._config["candle_type_def"])]
if (p, "1d", self._config["candle_type_def"]) in candles
candles[(p, "1d", self._def_candletype)]
if (p, "1d", self._def_candletype) in candles
else None
)
if not self._validate_pair_loc(p, daily_candles):

View File

@@ -37,7 +37,6 @@ class MarketCapPairList(IPairList):
self._refresh_period = self._pairlistconfig.get("refresh_period", 86400)
self._categories = self._pairlistconfig.get("categories", [])
self._marketcap_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period)
self._def_candletype = self._config["candle_type_def"]
_coingecko_config = self._config.get("coingecko", {})
@@ -191,7 +190,7 @@ class MarketCapPairList(IPairList):
if marketcap_list:
filtered_pairlist = []
market = self._config["trading_mode"]
market = self._exchange._config["trading_mode"]
pair_format = f"{self._stake_currency.upper()}"
if market == "futures":
pair_format += f":{self._stake_currency.upper()}"

View File

@@ -91,7 +91,7 @@ class PercentChangePairList(IPairList):
)
candle_limit = self._exchange.ohlcv_candle_limit(
self._lookback_timeframe, self._config["candle_type_def"]
self._lookback_timeframe, self._def_candletype
)
if self._lookback_period > candle_limit:

View File

@@ -40,7 +40,7 @@ class VolatilityFilter(IPairList):
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
if self._days < 1:
raise OperationalException("VolatilityFilter requires lookback_days to be >= 1")
if self._days > candle_limit:

View File

@@ -89,7 +89,7 @@ class VolumePairList(IPairList):
raise OperationalException(f"key {self._sort_key} not in {SORT_VALUES}")
candle_limit = self._exchange.ohlcv_candle_limit(
self._lookback_timeframe, self._config["candle_type_def"]
self._lookback_timeframe, self._def_candletype
)
if self._lookback_period < 0:
raise OperationalException("VolumeFilter requires lookback_period to be >= 0")

View File

@@ -34,7 +34,7 @@ class RangeStabilityFilter(IPairList):
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
if self._days < 1:
raise OperationalException("RangeStabilityFilter requires lookback_days to be >= 1")
if self._days > candle_limit:

View File

@@ -54,6 +54,7 @@ def __run_pairlist(job_id: str, config_loc: Config):
with FtNoDBContext():
exchange = get_exchange(config_loc)
config_loc["candle_type_def"] = exchange._config["candle_type_def"]
pairlists = PairListManager(exchange, config_loc)
pairlists.refresh_pairlist()
ApiBG.jobs[job_id]["result"] = {