mirror of
https://github.com/freqtrade/freqtrade.git
synced 2026-01-28 01:40:23 +00:00
Merge pull request #11154 from freqtrade/feat/zip_backtest
Zip backtest results
This commit is contained in:
@@ -1,63 +1,23 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from freqtrade.enums import RunMode
|
||||
from freqtrade.exceptions import ConfigurationError, OperationalException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_analyze_configuration(args: dict[str, Any], method: RunMode) -> dict[str, Any]:
|
||||
"""
|
||||
Prepare the configuration for the entry/exit reason analysis module
|
||||
:param args: Cli args from Arguments()
|
||||
:param method: Bot running mode
|
||||
:return: Configuration
|
||||
"""
|
||||
from freqtrade.configuration import setup_utils_configuration
|
||||
|
||||
config = setup_utils_configuration(args, method)
|
||||
|
||||
no_unlimited_runmodes = {
|
||||
RunMode.BACKTEST: "backtesting",
|
||||
}
|
||||
if method in no_unlimited_runmodes.keys():
|
||||
from freqtrade.data.btanalysis import get_latest_backtest_filename
|
||||
|
||||
if "exportfilename" in config:
|
||||
if config["exportfilename"].is_dir():
|
||||
btfile = Path(get_latest_backtest_filename(config["exportfilename"]))
|
||||
signals_file = f"{config['exportfilename']}/{btfile.stem}_signals.pkl"
|
||||
else:
|
||||
if config["exportfilename"].exists():
|
||||
btfile = Path(config["exportfilename"])
|
||||
signals_file = f"{btfile.parent}/{btfile.stem}_signals.pkl"
|
||||
else:
|
||||
raise ConfigurationError(f"{config['exportfilename']} does not exist.")
|
||||
else:
|
||||
raise ConfigurationError("exportfilename not in config.")
|
||||
|
||||
if not Path(signals_file).exists():
|
||||
raise OperationalException(
|
||||
f"Cannot find latest backtest signals file: {signals_file}."
|
||||
"Run backtesting with `--export signals`."
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def start_analysis_entries_exits(args: dict[str, Any]) -> None:
|
||||
"""
|
||||
Start analysis script
|
||||
:param args: Cli args from Arguments()
|
||||
:return: None
|
||||
"""
|
||||
from freqtrade.configuration import setup_utils_configuration
|
||||
from freqtrade.data.entryexitanalysis import process_entry_exit_reasons
|
||||
|
||||
# Initialize configuration
|
||||
config = setup_analyze_configuration(args, RunMode.BACKTEST)
|
||||
config = setup_utils_configuration(args, RunMode.BACKTEST)
|
||||
|
||||
logger.info("Starting freqtrade in analysis mode")
|
||||
|
||||
|
||||
@@ -3,8 +3,10 @@ Helpers when analyzing backtest data
|
||||
"""
|
||||
|
||||
import logging
|
||||
import zipfile
|
||||
from copy import copy
|
||||
from datetime import datetime, timezone
|
||||
from io import BytesIO, StringIO
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal
|
||||
|
||||
@@ -165,8 +167,16 @@ def load_backtest_stats(filename: Path | str) -> BacktestResultType:
|
||||
if not filename.is_file():
|
||||
raise ValueError(f"File {filename} does not exist.")
|
||||
logger.info(f"Loading backtest result from {filename}")
|
||||
with filename.open() as file:
|
||||
data = json_load(file)
|
||||
|
||||
if filename.suffix == ".zip":
|
||||
data = json_load(
|
||||
StringIO(
|
||||
load_file_from_zip(filename, filename.with_suffix(".json").name).decode("utf-8")
|
||||
)
|
||||
)
|
||||
else:
|
||||
with filename.open() as file:
|
||||
data = json_load(file)
|
||||
|
||||
# Legacy list format does not contain metadata.
|
||||
if isinstance(data, dict):
|
||||
@@ -194,8 +204,10 @@ def load_and_merge_backtest_result(strategy_name: str, filename: Path, results:
|
||||
|
||||
|
||||
def _get_backtest_files(dirname: Path) -> list[Path]:
|
||||
# Weird glob expression here avoids including .meta.json files.
|
||||
return list(reversed(sorted(dirname.glob("backtest-result-*-[0-9][0-9].json"))))
|
||||
# Get both json and zip files separately and combine the results
|
||||
json_files = dirname.glob("backtest-result-*-[0-9][0-9]*.json")
|
||||
zip_files = dirname.glob("backtest-result-*-[0-9][0-9]*.zip")
|
||||
return list(reversed(sorted(list(json_files) + list(zip_files))))
|
||||
|
||||
|
||||
def _extract_backtest_result(filename: Path) -> list[BacktestHistoryEntryType]:
|
||||
@@ -267,7 +279,11 @@ def get_backtest_market_change(filename: Path, include_ts: bool = True) -> pd.Da
|
||||
"""
|
||||
Read backtest market change file.
|
||||
"""
|
||||
df = pd.read_feather(filename)
|
||||
if filename.suffix == ".zip":
|
||||
data = load_file_from_zip(filename, f"{filename.stem}_market_change.feather")
|
||||
df = pd.read_feather(BytesIO(data))
|
||||
else:
|
||||
df = pd.read_feather(filename)
|
||||
if include_ts:
|
||||
df.loc[:, "__date_ts"] = df.loc[:, "date"].astype(np.int64) // 1000 // 1000
|
||||
return df
|
||||
@@ -388,6 +404,93 @@ def load_backtest_data(filename: Path | str, strategy: str | None = None) -> pd.
|
||||
return df
|
||||
|
||||
|
||||
def load_file_from_zip(zip_path: Path, filename: str) -> bytes:
|
||||
"""
|
||||
Load a file from a zip file
|
||||
:param zip_path: Path to the zip file
|
||||
:param filename: Name of the file to load
|
||||
:return: Bytes of the file
|
||||
:raises: ValueError if loading goes wrong.
|
||||
"""
|
||||
try:
|
||||
with zipfile.ZipFile(zip_path) as zipf:
|
||||
try:
|
||||
with zipf.open(filename) as file:
|
||||
return file.read()
|
||||
except KeyError:
|
||||
logger.error(f"File {filename} not found in zip: {zip_path}")
|
||||
raise ValueError(f"File {filename} not found in zip: {zip_path}") from None
|
||||
except FileNotFoundError:
|
||||
raise ValueError(f"Zip file {zip_path} not found.")
|
||||
except zipfile.BadZipFile:
|
||||
logger.error(f"Bad zip file: {zip_path}.")
|
||||
raise ValueError(f"Bad zip file: {zip_path}.") from None
|
||||
|
||||
|
||||
def load_backtest_analysis_data(backtest_dir: Path, name: str):
|
||||
"""
|
||||
Load backtest analysis data either from a pickle file or from within a zip file
|
||||
:param backtest_dir: Directory containing backtest results
|
||||
:param name: Name of the analysis data to load (signals, rejected, exited)
|
||||
:return: Analysis data
|
||||
"""
|
||||
import joblib
|
||||
|
||||
if backtest_dir.is_dir():
|
||||
lbf = Path(get_latest_backtest_filename(backtest_dir))
|
||||
zip_path = backtest_dir / lbf
|
||||
else:
|
||||
zip_path = backtest_dir
|
||||
|
||||
if zip_path.suffix == ".zip":
|
||||
# Load from zip file
|
||||
analysis_name = f"{zip_path.stem}_{name}.pkl"
|
||||
data = load_file_from_zip(zip_path, analysis_name)
|
||||
if not data:
|
||||
return None
|
||||
loaded_data = joblib.load(BytesIO(data))
|
||||
|
||||
logger.info(f"Loaded {name} candles from zip: {str(zip_path)}:{analysis_name}")
|
||||
return loaded_data
|
||||
|
||||
else:
|
||||
# Load from separate pickle file
|
||||
if backtest_dir.is_dir():
|
||||
scpf = Path(backtest_dir, f"{zip_path.stem}_{name}.pkl")
|
||||
else:
|
||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
|
||||
|
||||
try:
|
||||
with scpf.open("rb") as scp:
|
||||
loaded_data = joblib.load(scp)
|
||||
logger.info(f"Loaded {name} candles: {str(scpf)}")
|
||||
return loaded_data
|
||||
except Exception:
|
||||
logger.exception(f"Cannot load {name} data from pickled results.")
|
||||
return None
|
||||
|
||||
|
||||
def load_rejected_signals(backtest_dir: Path):
|
||||
"""
|
||||
Load rejected signals from backtest directory
|
||||
"""
|
||||
return load_backtest_analysis_data(backtest_dir, "rejected")
|
||||
|
||||
|
||||
def load_signal_candles(backtest_dir: Path):
|
||||
"""
|
||||
Load signal candles from backtest directory
|
||||
"""
|
||||
return load_backtest_analysis_data(backtest_dir, "signals")
|
||||
|
||||
|
||||
def load_exit_signal_candles(backtest_dir: Path) -> dict[str, dict[str, pd.DataFrame]]:
|
||||
"""
|
||||
Load exit signal candles from backtest directory
|
||||
"""
|
||||
return load_backtest_analysis_data(backtest_dir, "exited")
|
||||
|
||||
|
||||
def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataFrame:
|
||||
"""
|
||||
Find overlapping trades by expanding each trade once per period it was open
|
||||
|
||||
@@ -1,56 +1,25 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import joblib
|
||||
import pandas as pd
|
||||
|
||||
from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.data.btanalysis import (
|
||||
BT_DATA_COLUMNS,
|
||||
get_latest_backtest_filename,
|
||||
load_backtest_data,
|
||||
load_backtest_stats,
|
||||
load_exit_signal_candles,
|
||||
load_rejected_signals,
|
||||
load_signal_candles,
|
||||
)
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.exceptions import ConfigurationError, OperationalException
|
||||
from freqtrade.util import print_df_rich_table
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _load_backtest_analysis_data(backtest_dir: Path, name: str):
|
||||
if backtest_dir.is_dir():
|
||||
scpf = Path(
|
||||
backtest_dir,
|
||||
Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl",
|
||||
)
|
||||
else:
|
||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
|
||||
|
||||
try:
|
||||
with scpf.open("rb") as scp:
|
||||
loaded_data = joblib.load(scp)
|
||||
logger.info(f"Loaded {name} candles: {str(scpf)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Cannot load {name} data from pickled results: ", e)
|
||||
return None
|
||||
|
||||
return loaded_data
|
||||
|
||||
|
||||
def _load_rejected_signals(backtest_dir: Path):
|
||||
return _load_backtest_analysis_data(backtest_dir, "rejected")
|
||||
|
||||
|
||||
def _load_signal_candles(backtest_dir: Path):
|
||||
return _load_backtest_analysis_data(backtest_dir, "signals")
|
||||
|
||||
|
||||
def _load_exit_signal_candles(backtest_dir: Path) -> dict[str, dict[str, pd.DataFrame]]:
|
||||
return _load_backtest_analysis_data(backtest_dir, "exited")
|
||||
|
||||
|
||||
def _process_candles_and_indicators(
|
||||
pairlist, strategy_name, trades, signal_candles, date_col: str = "open_date"
|
||||
):
|
||||
@@ -374,19 +343,21 @@ def process_entry_exit_reasons(config: Config):
|
||||
timerange = TimeRange.parse_timerange(
|
||||
None if config.get("timerange") is None else str(config.get("timerange"))
|
||||
)
|
||||
|
||||
backtest_stats = load_backtest_stats(config["exportfilename"])
|
||||
try:
|
||||
backtest_stats = load_backtest_stats(config["exportfilename"])
|
||||
except ValueError as e:
|
||||
raise ConfigurationError(e) from e
|
||||
|
||||
for strategy_name, results in backtest_stats["strategy"].items():
|
||||
trades = load_backtest_data(config["exportfilename"], strategy_name)
|
||||
|
||||
if trades is not None and not trades.empty:
|
||||
signal_candles = _load_signal_candles(config["exportfilename"])
|
||||
exit_signals = _load_exit_signal_candles(config["exportfilename"])
|
||||
signal_candles = load_signal_candles(config["exportfilename"])
|
||||
exit_signals = load_exit_signal_candles(config["exportfilename"])
|
||||
|
||||
rej_df = None
|
||||
if do_rejected:
|
||||
rejected_signals_dict = _load_rejected_signals(config["exportfilename"])
|
||||
rejected_signals_dict = load_rejected_signals(config["exportfilename"])
|
||||
rej_df = prepare_results(
|
||||
rejected_signals_dict,
|
||||
strategy_name,
|
||||
|
||||
@@ -19,6 +19,15 @@ from freqtrade.enums import SignalTagType, SignalType
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def dump_json_to_file(file_obj: TextIO, data: Any) -> None:
|
||||
"""
|
||||
Dump JSON data into a file object
|
||||
:param file_obj: File object to write to
|
||||
:param data: JSON Data to save
|
||||
"""
|
||||
rapidjson.dump(data, file_obj, default=str, number_mode=rapidjson.NM_NATIVE)
|
||||
|
||||
|
||||
def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool = True) -> None:
|
||||
"""
|
||||
Dump JSON data into a file
|
||||
@@ -35,32 +44,16 @@ def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool =
|
||||
logger.info(f'dumping json to "{filename}"')
|
||||
|
||||
with gzip.open(filename, "wt", encoding="utf-8") as fpz:
|
||||
rapidjson.dump(data, fpz, default=str, number_mode=rapidjson.NM_NATIVE)
|
||||
dump_json_to_file(fpz, data)
|
||||
else:
|
||||
if log:
|
||||
logger.info(f'dumping json to "{filename}"')
|
||||
with filename.open("w") as fp:
|
||||
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
|
||||
dump_json_to_file(fp, data)
|
||||
|
||||
logger.debug(f'done json to "{filename}"')
|
||||
|
||||
|
||||
def file_dump_joblib(filename: Path, data: Any, log: bool = True) -> None:
|
||||
"""
|
||||
Dump object data into a file
|
||||
:param filename: file to create
|
||||
:param data: Object data to save
|
||||
:return:
|
||||
"""
|
||||
import joblib
|
||||
|
||||
if log:
|
||||
logger.info(f'dumping joblib to "{filename}"')
|
||||
with filename.open("wb") as fp:
|
||||
joblib.dump(data, fp)
|
||||
logger.debug(f'done joblib dump to "{filename}"')
|
||||
|
||||
|
||||
def json_load(datafile: TextIO) -> Any:
|
||||
"""
|
||||
load data with rapidjson
|
||||
|
||||
@@ -40,4 +40,4 @@ def get_strategy_run_id(strategy) -> str:
|
||||
def get_backtest_metadata_filename(filename: Path | str) -> Path:
|
||||
"""Return metadata filename for specified backtest results file."""
|
||||
filename = Path(filename)
|
||||
return filename.parent / Path(f"{filename.stem}.meta{filename.suffix}")
|
||||
return filename.parent / Path(f"{filename.stem}.meta.json")
|
||||
|
||||
@@ -1,18 +1,33 @@
|
||||
import logging
|
||||
from io import BytesIO, StringIO
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from zipfile import ZIP_DEFLATED, ZipFile
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||
from freqtrade.enums.runmode import RunMode
|
||||
from freqtrade.ft_types import BacktestResultType
|
||||
from freqtrade.misc import file_dump_joblib, file_dump_json
|
||||
from freqtrade.misc import dump_json_to_file, file_dump_json
|
||||
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def file_dump_joblib(file_obj: BytesIO, data: Any, log: bool = True) -> None:
|
||||
"""
|
||||
Dump object data into a file
|
||||
:param filename: file to create
|
||||
:param data: Object data to save
|
||||
:return:
|
||||
"""
|
||||
import joblib
|
||||
|
||||
joblib.dump(data, file_obj)
|
||||
|
||||
|
||||
def _generate_filename(recordfilename: Path, appendix: str, suffix: str) -> Path:
|
||||
"""
|
||||
Generates a filename based on the provided parameters.
|
||||
@@ -39,72 +54,59 @@ def store_backtest_results(
|
||||
analysis_results: dict[str, dict[str, DataFrame]] | None = None,
|
||||
) -> Path:
|
||||
"""
|
||||
Stores backtest results and analysis data
|
||||
Stores backtest results and analysis data in a zip file, with metadata stored separately
|
||||
for convenience.
|
||||
:param config: Configuration dictionary
|
||||
:param stats: Dataframe containing the backtesting statistics
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
:param market_change_data: Dataframe containing market change data
|
||||
:param analysis_results: Dictionary containing analysis results
|
||||
"""
|
||||
|
||||
# Path object, which can either be a filename or a directory.
|
||||
# Filenames will be appended with a timestamp right before the suffix
|
||||
# while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
|
||||
recordfilename: Path = config["exportfilename"]
|
||||
filename = _generate_filename(recordfilename, dtappendix, ".json")
|
||||
zip_filename = _generate_filename(recordfilename, dtappendix, ".zip")
|
||||
base_filename = _generate_filename(recordfilename, dtappendix, "")
|
||||
json_filename = _generate_filename(recordfilename, dtappendix, ".json")
|
||||
|
||||
# Store metadata separately.
|
||||
file_dump_json(get_backtest_metadata_filename(filename), stats["metadata"])
|
||||
# Don't mutate the original stats dict.
|
||||
stats_copy = {
|
||||
"strategy": stats["strategy"],
|
||||
"strategy_comparison": stats["strategy_comparison"],
|
||||
}
|
||||
# Store metadata separately with .json extension
|
||||
file_dump_json(get_backtest_metadata_filename(json_filename), stats["metadata"])
|
||||
|
||||
file_dump_json(filename, stats_copy)
|
||||
# Store latest backtest info separately
|
||||
latest_filename = Path.joinpath(zip_filename.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {"latest_backtest": str(zip_filename.name)}, log=False)
|
||||
|
||||
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {"latest_backtest": str(filename.name)})
|
||||
# Create zip file and add the files
|
||||
with ZipFile(zip_filename, "w", ZIP_DEFLATED) as zipf:
|
||||
# Store stats
|
||||
stats_copy = {
|
||||
"strategy": stats["strategy"],
|
||||
"strategy_comparison": stats["strategy_comparison"],
|
||||
}
|
||||
stats_buf = StringIO()
|
||||
dump_json_to_file(stats_buf, stats_copy)
|
||||
zipf.writestr(json_filename.name, stats_buf.getvalue())
|
||||
|
||||
if market_change_data is not None:
|
||||
filename_mc = _generate_filename(recordfilename, f"{dtappendix}_market_change", ".feather")
|
||||
market_change_data.reset_index().to_feather(
|
||||
filename_mc, compression_level=9, compression="lz4"
|
||||
)
|
||||
# Add market change data if present
|
||||
if market_change_data is not None:
|
||||
market_change_name = f"{base_filename.stem}_market_change.feather"
|
||||
market_change_buf = BytesIO()
|
||||
market_change_data.reset_index().to_feather(
|
||||
market_change_buf, compression_level=9, compression="lz4"
|
||||
)
|
||||
market_change_buf.seek(0)
|
||||
zipf.writestr(market_change_name, market_change_buf.getvalue())
|
||||
|
||||
if (
|
||||
config.get("export", "none") == "signals"
|
||||
and analysis_results is not None
|
||||
and config.get("runmode", RunMode.OTHER) == RunMode.BACKTEST
|
||||
):
|
||||
_store_backtest_analysis_data(
|
||||
recordfilename, analysis_results["signals"], dtappendix, "signals"
|
||||
)
|
||||
_store_backtest_analysis_data(
|
||||
recordfilename, analysis_results["rejected"], dtappendix, "rejected"
|
||||
)
|
||||
_store_backtest_analysis_data(
|
||||
recordfilename, analysis_results["exited"], dtappendix, "exited"
|
||||
)
|
||||
# Add analysis results if present and running in backtest mode
|
||||
if (
|
||||
config.get("export", "none") == "signals"
|
||||
and analysis_results is not None
|
||||
and config.get("runmode", RunMode.OTHER) == RunMode.BACKTEST
|
||||
):
|
||||
for name in ["signals", "rejected", "exited"]:
|
||||
if name in analysis_results:
|
||||
analysis_name = f"{base_filename.stem}_{name}.pkl"
|
||||
analysis_buf = BytesIO()
|
||||
file_dump_joblib(analysis_buf, analysis_results[name])
|
||||
analysis_buf.seek(0)
|
||||
zipf.writestr(analysis_name, analysis_buf.getvalue())
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def _store_backtest_analysis_data(
|
||||
recordfilename: Path, data: dict[str, dict], dtappendix: str, name: str
|
||||
) -> Path:
|
||||
"""
|
||||
Stores backtest trade candles for analysis
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
|
||||
as filename
|
||||
:param candles: Dict containing the backtesting data for analysis
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
:param name: Name to use for the file, e.g. signals, rejected
|
||||
"""
|
||||
filename = _generate_filename(recordfilename, f"{dtappendix}_{name}", ".pkl")
|
||||
|
||||
file_dump_joblib(filename, data)
|
||||
|
||||
return filename
|
||||
return zip_filename
|
||||
|
||||
@@ -273,15 +273,18 @@ def api_backtest_history(config=Depends(get_config)):
|
||||
def api_backtest_history_result(filename: str, strategy: str, config=Depends(get_config)):
|
||||
# Get backtest result history, read from metadata files
|
||||
bt_results_base: Path = config["user_data_dir"] / "backtest_results"
|
||||
fn = (bt_results_base / filename).with_suffix(".json")
|
||||
for ext in [".zip", ".json"]:
|
||||
fn = (bt_results_base / filename).with_suffix(ext)
|
||||
if is_file_in_dir(fn, bt_results_base):
|
||||
break
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail="File not found.")
|
||||
|
||||
results: dict[str, Any] = {
|
||||
"metadata": {},
|
||||
"strategy": {},
|
||||
"strategy_comparison": [],
|
||||
}
|
||||
if not is_file_in_dir(fn, bt_results_base):
|
||||
raise HTTPException(status_code=404, detail="File not found.")
|
||||
load_and_merge_backtest_result(strategy, fn, results)
|
||||
return {
|
||||
"status": "ended",
|
||||
@@ -301,9 +304,12 @@ def api_backtest_history_result(filename: str, strategy: str, config=Depends(get
|
||||
def api_delete_backtest_history_entry(file: str, config=Depends(get_config)):
|
||||
# Get backtest result history, read from metadata files
|
||||
bt_results_base: Path = config["user_data_dir"] / "backtest_results"
|
||||
file_abs = (bt_results_base / file).with_suffix(".json")
|
||||
# Ensure file is in backtest_results directory
|
||||
if not is_file_in_dir(file_abs, bt_results_base):
|
||||
for ext in [".zip", ".json"]:
|
||||
file_abs = (bt_results_base / file).with_suffix(ext)
|
||||
# Ensure file is in backtest_results directory
|
||||
if is_file_in_dir(file_abs, bt_results_base):
|
||||
break
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail="File not found.")
|
||||
|
||||
delete_backtest_result(file_abs)
|
||||
@@ -320,10 +326,14 @@ def api_update_backtest_history_entry(
|
||||
):
|
||||
# Get backtest result history, read from metadata files
|
||||
bt_results_base: Path = config["user_data_dir"] / "backtest_results"
|
||||
file_abs = (bt_results_base / file).with_suffix(".json")
|
||||
# Ensure file is in backtest_results directory
|
||||
if not is_file_in_dir(file_abs, bt_results_base):
|
||||
for ext in [".zip", ".json"]:
|
||||
file_abs = (bt_results_base / file).with_suffix(ext)
|
||||
# Ensure file is in backtest_results directory
|
||||
if is_file_in_dir(file_abs, bt_results_base):
|
||||
break
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail="File not found.")
|
||||
|
||||
content = {"notes": body.notes}
|
||||
try:
|
||||
update_backtest_metadata(file_abs, body.strategy, content)
|
||||
@@ -340,10 +350,17 @@ def api_update_backtest_history_entry(
|
||||
)
|
||||
def api_get_backtest_market_change(file: str, config=Depends(get_config)):
|
||||
bt_results_base: Path = config["user_data_dir"] / "backtest_results"
|
||||
file_abs = (bt_results_base / f"{file}_market_change").with_suffix(".feather")
|
||||
# Ensure file is in backtest_results directory
|
||||
if not is_file_in_dir(file_abs, bt_results_base):
|
||||
for fn in (
|
||||
Path(file).with_suffix(".zip"),
|
||||
Path(f"{file}_market_change").with_suffix(".feather"),
|
||||
):
|
||||
file_abs = bt_results_base / fn
|
||||
# Ensure file is in backtest_results directory
|
||||
if is_file_in_dir(file_abs, bt_results_base):
|
||||
break
|
||||
else:
|
||||
raise HTTPException(status_code=404, detail="File not found.")
|
||||
|
||||
df = get_backtest_market_change(file_abs)
|
||||
|
||||
return {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
from zipfile import ZipFile
|
||||
|
||||
import pytest
|
||||
from pandas import DataFrame, DateOffset, Timestamp, to_datetime
|
||||
@@ -15,6 +16,7 @@ from freqtrade.data.btanalysis import (
|
||||
get_latest_hyperopt_file,
|
||||
load_backtest_data,
|
||||
load_backtest_metadata,
|
||||
load_file_from_zip,
|
||||
load_trades,
|
||||
load_trades_from_db,
|
||||
)
|
||||
@@ -569,3 +571,22 @@ def test_calculate_max_drawdown_abs(profits, relative, highd, lowdays, result, r
|
||||
assert drawdown.high_value > drawdown.low_value
|
||||
assert drawdown.drawdown_abs == result
|
||||
assert pytest.approx(drawdown.relative_account_drawdown) == result_rel
|
||||
|
||||
|
||||
def test_load_file_from_zip(tmp_path):
|
||||
with pytest.raises(ValueError, match=r"Zip file .* not found\."):
|
||||
load_file_from_zip(tmp_path / "test.zip", "testfile.txt")
|
||||
|
||||
(tmp_path / "testfile.zip").touch()
|
||||
with pytest.raises(ValueError, match=r"Bad zip file.*"):
|
||||
load_file_from_zip(tmp_path / "testfile.zip", "testfile.txt")
|
||||
|
||||
zip_file = tmp_path / "testfile2.zip"
|
||||
with ZipFile(zip_file, "w") as zipf:
|
||||
zipf.writestr("testfile.txt", "testfile content")
|
||||
|
||||
content = load_file_from_zip(zip_file, "testfile.txt")
|
||||
assert content.decode("utf-8") == "testfile content"
|
||||
|
||||
with pytest.raises(ValueError, match=r"File .* not found in zip.*"):
|
||||
load_file_from_zip(zip_file, "testfile55.txt")
|
||||
|
||||
@@ -2660,7 +2660,7 @@ def test_get_backtest_metadata_filename():
|
||||
|
||||
# Test with a string file path with no extension
|
||||
filename = "/path/to/backtest_results"
|
||||
expected = Path("/path/to/backtest_results.meta")
|
||||
expected = Path("/path/to/backtest_results.meta.json")
|
||||
assert get_backtest_metadata_filename(filename) == expected
|
||||
|
||||
# Test with a string file path with multiple dots in the name
|
||||
@@ -2672,3 +2672,8 @@ def test_get_backtest_metadata_filename():
|
||||
filename = "backtest_results.json"
|
||||
expected = Path("backtest_results.meta.json")
|
||||
assert get_backtest_metadata_filename(filename) == expected
|
||||
# Test with a string file path with no parent directory
|
||||
|
||||
filename = "backtest_results_zip.zip"
|
||||
expected = Path("backtest_results_zip.meta.json")
|
||||
assert get_backtest_metadata_filename(filename) == expected
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import json
|
||||
import re
|
||||
from datetime import timedelta
|
||||
from pathlib import Path
|
||||
from shutil import copyfile
|
||||
from zipfile import ZipFile
|
||||
|
||||
import joblib
|
||||
import pandas as pd
|
||||
@@ -229,13 +231,14 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmp_path):
|
||||
|
||||
store_backtest_results(default_conf, stats, "2022_01_01_15_05_13")
|
||||
|
||||
# get real Filename (it's btresult-<date>.json)
|
||||
# get real Filename (it's btresult-<date>.zip)
|
||||
last_fn = get_latest_backtest_filename(filename_last.parent)
|
||||
assert re.match(r"btresult-.*\.json", last_fn)
|
||||
assert re.match(r"btresult-.*\.zip", last_fn)
|
||||
|
||||
filename1 = tmp_path / last_fn
|
||||
assert filename1.is_file()
|
||||
content = filename1.read_text()
|
||||
|
||||
content = json.dumps(load_backtest_stats(filename1))
|
||||
assert "max_drawdown_account" in content
|
||||
assert "strategy" in content
|
||||
assert "pairlist" in content
|
||||
@@ -248,19 +251,22 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmp_path):
|
||||
|
||||
def test_store_backtest_results(testdatadir, mocker):
|
||||
dump_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.file_dump_json")
|
||||
|
||||
zip_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.ZipFile")
|
||||
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||
|
||||
store_backtest_results({"exportfilename": testdatadir}, data, "2022_01_01_15_05_13")
|
||||
|
||||
assert dump_mock.call_count == 3
|
||||
assert dump_mock.call_count == 2
|
||||
assert zip_mock.call_count == 1
|
||||
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
||||
assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / "backtest-result"))
|
||||
|
||||
dump_mock.reset_mock()
|
||||
zip_mock.reset_mock()
|
||||
filename = testdatadir / "testresult.json"
|
||||
store_backtest_results({"exportfilename": filename}, data, "2022_01_01_15_05_13")
|
||||
assert dump_mock.call_count == 3
|
||||
assert dump_mock.call_count == 2
|
||||
assert zip_mock.call_count == 1
|
||||
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
||||
# result will be testdatadir / testresult-<timestamp>.json
|
||||
assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / "testresult"))
|
||||
@@ -270,67 +276,33 @@ def test_store_backtest_results_real(tmp_path):
|
||||
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||
store_backtest_results({"exportfilename": tmp_path}, data, "2022_01_01_15_05_13")
|
||||
|
||||
assert (tmp_path / "backtest-result-2022_01_01_15_05_13.json").is_file()
|
||||
zip_file = tmp_path / "backtest-result-2022_01_01_15_05_13.zip"
|
||||
assert zip_file.is_file()
|
||||
assert (tmp_path / "backtest-result-2022_01_01_15_05_13.meta.json").is_file()
|
||||
assert not (tmp_path / "backtest-result-2022_01_01_15_05_13_market_change.feather").is_file()
|
||||
with ZipFile(zip_file, "r") as zipf:
|
||||
assert "backtest-result-2022_01_01_15_05_13.json" in zipf.namelist()
|
||||
assert "backtest-result-2022_01_01_15_05_13_market_change.feather" not in zipf.namelist()
|
||||
assert (tmp_path / LAST_BT_RESULT_FN).is_file()
|
||||
fn = get_latest_backtest_filename(tmp_path)
|
||||
assert fn == "backtest-result-2022_01_01_15_05_13.json"
|
||||
assert fn == "backtest-result-2022_01_01_15_05_13.zip"
|
||||
|
||||
store_backtest_results(
|
||||
{"exportfilename": tmp_path}, data, "2024_01_01_15_05_25", market_change_data=pd.DataFrame()
|
||||
)
|
||||
assert (tmp_path / "backtest-result-2024_01_01_15_05_25.json").is_file()
|
||||
zip_file = tmp_path / "backtest-result-2024_01_01_15_05_25.zip"
|
||||
assert zip_file.is_file()
|
||||
assert (tmp_path / "backtest-result-2024_01_01_15_05_25.meta.json").is_file()
|
||||
assert (tmp_path / "backtest-result-2024_01_01_15_05_25_market_change.feather").is_file()
|
||||
assert not (tmp_path / "backtest-result-2024_01_01_15_05_25_market_change.feather").is_file()
|
||||
|
||||
with ZipFile(zip_file, "r") as zipf:
|
||||
assert "backtest-result-2024_01_01_15_05_25.json" in zipf.namelist()
|
||||
assert "backtest-result-2024_01_01_15_05_25_market_change.feather" in zipf.namelist()
|
||||
assert (tmp_path / LAST_BT_RESULT_FN).is_file()
|
||||
|
||||
# Last file reference should be updated
|
||||
fn = get_latest_backtest_filename(tmp_path)
|
||||
assert fn == "backtest-result-2024_01_01_15_05_25.json"
|
||||
|
||||
|
||||
def test_store_backtest_candles(tmp_path, mocker):
|
||||
mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.file_dump_json")
|
||||
dump_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.file_dump_joblib")
|
||||
|
||||
candle_dict = {"DefStrat": {"UNITTEST/BTC": pd.DataFrame()}}
|
||||
bt_results = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||
|
||||
mock_conf = {
|
||||
"exportfilename": tmp_path,
|
||||
"export": "signals",
|
||||
"runmode": "backtest",
|
||||
}
|
||||
|
||||
# mock directory exporting
|
||||
data = {
|
||||
"signals": candle_dict,
|
||||
"rejected": {},
|
||||
"exited": {},
|
||||
}
|
||||
|
||||
store_backtest_results(mock_conf, bt_results, "2022_01_01_15_05_13", analysis_results=data)
|
||||
|
||||
assert dump_mock.call_count == 3
|
||||
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
||||
assert str(dump_mock.call_args_list[0][0][0]).endswith("_signals.pkl")
|
||||
assert str(dump_mock.call_args_list[1][0][0]).endswith("_rejected.pkl")
|
||||
assert str(dump_mock.call_args_list[2][0][0]).endswith("_exited.pkl")
|
||||
|
||||
dump_mock.reset_mock()
|
||||
# mock file exporting
|
||||
filename = Path(tmp_path / "testresult")
|
||||
mock_conf["exportfilename"] = filename
|
||||
store_backtest_results(mock_conf, bt_results, "2022_01_01_15_05_13", analysis_results=data)
|
||||
assert dump_mock.call_count == 3
|
||||
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
||||
# result will be tmp_path / testresult-<timestamp>_signals.pkl
|
||||
assert str(dump_mock.call_args_list[0][0][0]).endswith("_signals.pkl")
|
||||
assert str(dump_mock.call_args_list[1][0][0]).endswith("_rejected.pkl")
|
||||
assert str(dump_mock.call_args_list[2][0][0]).endswith("_exited.pkl")
|
||||
|
||||
dump_mock.reset_mock()
|
||||
assert fn == "backtest-result-2024_01_01_15_05_25.zip"
|
||||
|
||||
|
||||
def test_write_read_backtest_candles(tmp_path):
|
||||
@@ -350,9 +322,21 @@ def test_write_read_backtest_candles(tmp_path):
|
||||
"exited": {},
|
||||
}
|
||||
store_backtest_results(mock_conf, bt_results, sample_date, analysis_results=data)
|
||||
stored_file = tmp_path / f"backtest-result-{sample_date}_signals.pkl"
|
||||
with stored_file.open("rb") as scp:
|
||||
pickled_signal_candles = joblib.load(scp)
|
||||
stored_file = tmp_path / f"backtest-result-{sample_date}.zip"
|
||||
signals_pkl = f"backtest-result-{sample_date}_signals.pkl"
|
||||
rejected_pkl = f"backtest-result-{sample_date}_rejected.pkl"
|
||||
exited_pkl = f"backtest-result-{sample_date}_exited.pkl"
|
||||
assert not (tmp_path / signals_pkl).is_file()
|
||||
assert stored_file.is_file()
|
||||
|
||||
with ZipFile(stored_file, "r") as zipf:
|
||||
assert signals_pkl in zipf.namelist()
|
||||
assert rejected_pkl in zipf.namelist()
|
||||
assert exited_pkl in zipf.namelist()
|
||||
|
||||
# open and read the file
|
||||
with zipf.open(signals_pkl) as scp:
|
||||
pickled_signal_candles = joblib.load(scp)
|
||||
|
||||
assert pickled_signal_candles.keys() == candle_dict.keys()
|
||||
assert pickled_signal_candles["DefStrat"].keys() == pickled_signal_candles["DefStrat"].keys()
|
||||
@@ -366,14 +350,25 @@ def test_write_read_backtest_candles(tmp_path):
|
||||
filename = tmp_path / "testresult"
|
||||
mock_conf["exportfilename"] = filename
|
||||
store_backtest_results(mock_conf, bt_results, sample_date, analysis_results=data)
|
||||
stored_file = tmp_path / f"testresult-{sample_date}_signals.pkl"
|
||||
with stored_file.open("rb") as scp:
|
||||
pickled_signal_candles = joblib.load(scp)
|
||||
stored_file = tmp_path / f"testresult-{sample_date}.zip"
|
||||
signals_pkl = f"testresult-{sample_date}_signals.pkl"
|
||||
rejected_pkl = f"testresult-{sample_date}_rejected.pkl"
|
||||
exited_pkl = f"testresult-{sample_date}_exited.pkl"
|
||||
assert not (tmp_path / signals_pkl).is_file()
|
||||
assert stored_file.is_file()
|
||||
|
||||
assert pickled_signal_candles.keys() == candle_dict.keys()
|
||||
assert pickled_signal_candles["DefStrat"].keys() == pickled_signal_candles["DefStrat"].keys()
|
||||
assert pickled_signal_candles["DefStrat"]["UNITTEST/BTC"].equals(
|
||||
pickled_signal_candles["DefStrat"]["UNITTEST/BTC"]
|
||||
with ZipFile(stored_file, "r") as zipf:
|
||||
assert signals_pkl in zipf.namelist()
|
||||
assert rejected_pkl in zipf.namelist()
|
||||
assert exited_pkl in zipf.namelist()
|
||||
|
||||
with zipf.open(signals_pkl) as scp:
|
||||
pickled_signal_candles2 = joblib.load(scp)
|
||||
|
||||
assert pickled_signal_candles2.keys() == candle_dict.keys()
|
||||
assert pickled_signal_candles2["DefStrat"].keys() == pickled_signal_candles2["DefStrat"].keys()
|
||||
assert pickled_signal_candles2["DefStrat"]["UNITTEST/BTC"].equals(
|
||||
pickled_signal_candles2["DefStrat"]["UNITTEST/BTC"]
|
||||
)
|
||||
|
||||
_clean_test_file(stored_file)
|
||||
|
||||
Reference in New Issue
Block a user