mirror of
https://github.com/freqtrade/freqtrade.git
synced 2026-02-08 07:10:28 +00:00
Merge branch 'freqtrade:develop' into develop
This commit is contained in:
6
.github/workflows/ci.yml
vendored
6
.github/workflows/ci.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
- name: Installation - *nix
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
||||
python -m pip install --upgrade pip wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||
@@ -163,7 +163,7 @@ jobs:
|
||||
rm /usr/local/bin/python3.11-config || true
|
||||
|
||||
brew install hdf5 c-blosc
|
||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
||||
python -m pip install --upgrade pip wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||
@@ -352,7 +352,7 @@ jobs:
|
||||
- name: Installation - *nix
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
||||
python -m pip install --upgrade pip wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||
|
||||
@@ -15,10 +15,10 @@ repos:
|
||||
additional_dependencies:
|
||||
- types-cachetools==5.3.0.5
|
||||
- types-filelock==3.2.7
|
||||
- types-requests==2.28.11.17
|
||||
- types-requests==2.29.0.0
|
||||
- types-tabulate==0.9.0.2
|
||||
- types-python-dateutil==2.8.19.12
|
||||
- SQLAlchemy==2.0.10
|
||||
- SQLAlchemy==2.0.12
|
||||
# stages: [push]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
@@ -30,7 +30,7 @@ repos:
|
||||
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: 'v0.0.255'
|
||||
rev: 'v0.0.263'
|
||||
hooks:
|
||||
- id: ruff
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ FROM base as python-deps
|
||||
RUN apt-get update \
|
||||
&& apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \
|
||||
&& apt-get clean \
|
||||
&& pip install --upgrade pip==23.0.1 wheel==0.38.4
|
||||
&& pip install --upgrade pip wheel
|
||||
|
||||
# Install TA-lib
|
||||
COPY build_helpers/* /tmp/
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Downloads don't work automatically, since the URL is regenerated via javascript.
|
||||
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
|
||||
|
||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
||||
python -m pip install --upgrade pip wheel
|
||||
|
||||
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ If all goes well, you should now see a `backtest-result-{timestamp}_signals.pkl`
|
||||
`user_data/backtest_results` folder.
|
||||
|
||||
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
|
||||
with `--analysis-groups` option provided with space-separated arguments (default `0 1 2`):
|
||||
with `--analysis-groups` option provided with space-separated arguments:
|
||||
|
||||
``` bash
|
||||
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4 5
|
||||
@@ -39,6 +39,7 @@ This command will read from the last backtesting results. The `--analysis-groups
|
||||
used to specify the various tabular outputs showing the profit fo each group or trade,
|
||||
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
|
||||
|
||||
* 0: overall winrate and profit summary by enter_tag
|
||||
* 1: profit summaries grouped by enter_tag
|
||||
* 2: profit summaries grouped by enter_tag and exit_tag
|
||||
* 3: profit summaries grouped by pair and enter_tag
|
||||
@@ -115,3 +116,38 @@ For example, if your backtest timerange was `20220101-20221231` but you only wan
|
||||
```bash
|
||||
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
|
||||
```
|
||||
|
||||
### Printing out rejected signals
|
||||
|
||||
Use the `--rejected-signals` option to print out rejected signals.
|
||||
|
||||
```bash
|
||||
freqtrade backtesting-analysis -c <config.json> --rejected-signals
|
||||
```
|
||||
|
||||
### Writing tables to CSV
|
||||
|
||||
Some of the tabular outputs can become large, so printing them out to the terminal is not preferable.
|
||||
Use the `--analysis-to-csv` option to disable printing out of tables to standard out and write them to CSV files.
|
||||
|
||||
```bash
|
||||
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv
|
||||
```
|
||||
|
||||
By default this will write one file per output table you specified in the `backtesting-analysis` command, e.g.
|
||||
|
||||
```bash
|
||||
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --rejected-signals --analysis-groups 0 1
|
||||
```
|
||||
|
||||
This will write to `user_data/backtest_results`:
|
||||
|
||||
* rejected_signals.csv
|
||||
* group_0.csv
|
||||
* group_1.csv
|
||||
|
||||
To override where the files will be written, also specify the `--analysis-csv-path` option.
|
||||
|
||||
```bash
|
||||
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --analysis-csv-path another/data/path/
|
||||
```
|
||||
|
||||
@@ -30,12 +30,6 @@ The easiest way to install and run Freqtrade is to clone the bot Github reposito
|
||||
!!! Warning "Up-to-date clock"
|
||||
The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges.
|
||||
|
||||
!!! Error "Running setup.py install for gym did not run successfully."
|
||||
If you get an error related with gym we suggest you to downgrade setuptools it to version 65.5.0 you can do it with the following command:
|
||||
```bash
|
||||
pip install setuptools==65.5.0
|
||||
```
|
||||
|
||||
------
|
||||
|
||||
## Requirements
|
||||
@@ -242,6 +236,7 @@ source .env/bin/activate
|
||||
|
||||
```bash
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install -r requirements.txt
|
||||
python3 -m pip install -e .
|
||||
```
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
markdown==3.3.7
|
||||
mkdocs==1.4.2
|
||||
mkdocs-material==9.1.7
|
||||
mkdocs-material==9.1.8
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==9.11
|
||||
jinja2==3.1.2
|
||||
|
||||
@@ -227,8 +227,8 @@ for val in self.buy_ema_short.range:
|
||||
f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val)
|
||||
}))
|
||||
|
||||
# Append columns to existing dataframe
|
||||
merged_frame = pd.concat(frames, axis=1)
|
||||
# Combine all dataframes, and reassign the original dataframe column
|
||||
dataframe = pd.concat(frames, axis=1)
|
||||
```
|
||||
|
||||
Freqtrade does however also counter this by running `dataframe.copy()` on the dataframe right after the `populate_indicators()` method - so performance implications of this should be low to non-existant.
|
||||
|
||||
@@ -723,6 +723,9 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V]
|
||||
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
|
||||
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
|
||||
[--timerange YYYYMMDD-[YYYYMMDD]]
|
||||
[--rejected]
|
||||
[--analysis-to-csv]
|
||||
[--analysis-csv-path PATH]
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
@@ -736,19 +739,27 @@ optional arguments:
|
||||
pair and enter_tag, 4: by pair, enter_ and exit_tag
|
||||
(this can get quite large)
|
||||
--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]
|
||||
Comma separated list of entry signals to analyse.
|
||||
Default: all. e.g. 'entry_tag_a,entry_tag_b'
|
||||
Space separated list of entry signals to analyse.
|
||||
Default: all. e.g. 'entry_tag_a entry_tag_b'
|
||||
--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]
|
||||
Comma separated list of exit signals to analyse.
|
||||
Space separated list of exit signals to analyse.
|
||||
Default: all. e.g.
|
||||
'exit_tag_a,roi,stop_loss,trailing_stop_loss'
|
||||
'exit_tag_a roi stop_loss trailing_stop_loss'
|
||||
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
|
||||
Comma separated list of indicators to analyse. e.g.
|
||||
'close,rsi,bb_lowerband,profit_abs'
|
||||
Space separated list of indicators to analyse. e.g.
|
||||
'close rsi bb_lowerband profit_abs'
|
||||
--timerange YYYYMMDD-[YYYYMMDD]
|
||||
Timerange to filter trades for analysis,
|
||||
start inclusive, end exclusive. e.g.
|
||||
20220101-20220201
|
||||
--rejected
|
||||
Print out rejected trades table
|
||||
--analysis-to-csv
|
||||
Write out tables to individual CSVs, by default to
|
||||
'user_data/backtest_results' unless '--analysis-csv-path' is given.
|
||||
--analysis-csv-path [PATH]
|
||||
Optional path where individual CSVs will be written. If not used,
|
||||
CSVs will be written to 'user_data/backtest_results'.
|
||||
|
||||
Common arguments:
|
||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||
|
||||
@@ -106,7 +106,8 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
|
||||
"disableparamexport", "backtest_breakdown"]
|
||||
|
||||
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
|
||||
"exit_reason_list", "indicator_list", "timerange"]
|
||||
"exit_reason_list", "indicator_list", "timerange",
|
||||
"analysis_rejected", "analysis_to_csv", "analysis_csv_path"]
|
||||
|
||||
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
||||
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
|
||||
|
||||
@@ -636,30 +636,45 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
"4: by pair, enter_ and exit_tag (this can get quite large), "
|
||||
"5: by exit_tag"),
|
||||
nargs='+',
|
||||
default=['0', '1', '2'],
|
||||
default=[],
|
||||
choices=['0', '1', '2', '3', '4', '5'],
|
||||
),
|
||||
"enter_reason_list": Arg(
|
||||
"--enter-reason-list",
|
||||
help=("Comma separated list of entry signals to analyse. Default: all. "
|
||||
"e.g. 'entry_tag_a,entry_tag_b'"),
|
||||
help=("Space separated list of entry signals to analyse. Default: all. "
|
||||
"e.g. 'entry_tag_a entry_tag_b'"),
|
||||
nargs='+',
|
||||
default=['all'],
|
||||
),
|
||||
"exit_reason_list": Arg(
|
||||
"--exit-reason-list",
|
||||
help=("Comma separated list of exit signals to analyse. Default: all. "
|
||||
"e.g. 'exit_tag_a,roi,stop_loss,trailing_stop_loss'"),
|
||||
help=("Space separated list of exit signals to analyse. Default: all. "
|
||||
"e.g. 'exit_tag_a roi stop_loss trailing_stop_loss'"),
|
||||
nargs='+',
|
||||
default=['all'],
|
||||
),
|
||||
"indicator_list": Arg(
|
||||
"--indicator-list",
|
||||
help=("Comma separated list of indicators to analyse. "
|
||||
"e.g. 'close,rsi,bb_lowerband,profit_abs'"),
|
||||
help=("Space separated list of indicators to analyse. "
|
||||
"e.g. 'close rsi bb_lowerband profit_abs'"),
|
||||
nargs='+',
|
||||
default=[],
|
||||
),
|
||||
"analysis_rejected": Arg(
|
||||
'--rejected-signals',
|
||||
help='Analyse rejected signals',
|
||||
action='store_true',
|
||||
),
|
||||
"analysis_to_csv": Arg(
|
||||
'--analysis-to-csv',
|
||||
help='Save selected analysis tables to individual CSVs',
|
||||
action='store_true',
|
||||
),
|
||||
"analysis_csv_path": Arg(
|
||||
'--analysis-csv-path',
|
||||
help=("Specify a path to save the analysis CSVs "
|
||||
"if --analysis-to-csv is enabled. Default: user_data/basktesting_results/"),
|
||||
),
|
||||
"freqaimodel": Arg(
|
||||
'--freqaimodel',
|
||||
help='Specify a custom freqaimodels.',
|
||||
|
||||
@@ -465,6 +465,15 @@ class Configuration:
|
||||
self._args_to_config(config, argname='timerange',
|
||||
logstring='Filter trades by timerange: {}')
|
||||
|
||||
self._args_to_config(config, argname='analysis_rejected',
|
||||
logstring='Analyse rejected signals: {}')
|
||||
|
||||
self._args_to_config(config, argname='analysis_to_csv',
|
||||
logstring='Store analysis tables to CSV: {}')
|
||||
|
||||
self._args_to_config(config, argname='analysis_csv_path',
|
||||
logstring='Path to store analysis CSVs: {}')
|
||||
|
||||
def _process_runmode(self, config: Config) -> None:
|
||||
|
||||
self._args_to_config(config, argname='dry_run',
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import joblib
|
||||
import pandas as pd
|
||||
@@ -15,22 +16,31 @@ from freqtrade.exceptions import OperationalException
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _load_signal_candles(backtest_dir: Path):
|
||||
def _load_backtest_analysis_data(backtest_dir: Path, name: str):
|
||||
if backtest_dir.is_dir():
|
||||
scpf = Path(backtest_dir,
|
||||
Path(get_latest_backtest_filename(backtest_dir)).stem + "_signals.pkl"
|
||||
Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl"
|
||||
)
|
||||
else:
|
||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_signals.pkl")
|
||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
|
||||
|
||||
try:
|
||||
with scpf.open("rb") as scp:
|
||||
signal_candles = joblib.load(scp)
|
||||
logger.info(f"Loaded signal candles: {str(scpf)}")
|
||||
loaded_data = joblib.load(scp)
|
||||
logger.info(f"Loaded {name} candles: {str(scpf)}")
|
||||
except Exception as e:
|
||||
logger.error("Cannot load signal candles from pickled results: ", e)
|
||||
logger.error(f"Cannot load {name} data from pickled results: ", e)
|
||||
return None
|
||||
|
||||
return signal_candles
|
||||
return loaded_data
|
||||
|
||||
|
||||
def _load_rejected_signals(backtest_dir: Path):
|
||||
return _load_backtest_analysis_data(backtest_dir, "rejected")
|
||||
|
||||
|
||||
def _load_signal_candles(backtest_dir: Path):
|
||||
return _load_backtest_analysis_data(backtest_dir, "signals")
|
||||
|
||||
|
||||
def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_candles):
|
||||
@@ -43,9 +53,7 @@ def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_cand
|
||||
for pair in pairlist:
|
||||
if pair in signal_candles[strategy_name]:
|
||||
analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators(
|
||||
pair,
|
||||
trades,
|
||||
signal_candles[strategy_name][pair])
|
||||
pair, trades, signal_candles[strategy_name][pair])
|
||||
except Exception as e:
|
||||
print(f"Cannot process entry/exit reasons for {strategy_name}: ", e)
|
||||
|
||||
@@ -85,7 +93,7 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles:
|
||||
return pd.DataFrame()
|
||||
|
||||
|
||||
def _do_group_table_output(bigdf, glist):
|
||||
def _do_group_table_output(bigdf, glist, csv_path: Path, to_csv=False, ):
|
||||
for g in glist:
|
||||
# 0: summary wins/losses grouped by enter tag
|
||||
if g == "0":
|
||||
@@ -116,7 +124,8 @@ def _do_group_table_output(bigdf, glist):
|
||||
|
||||
sortcols = ['total_num_buys']
|
||||
|
||||
_print_table(new, sortcols, show_index=True)
|
||||
_print_table(new, sortcols, show_index=True, name="Group 0:",
|
||||
to_csv=to_csv, csv_path=csv_path)
|
||||
|
||||
else:
|
||||
agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'],
|
||||
@@ -154,11 +163,24 @@ def _do_group_table_output(bigdf, glist):
|
||||
new['mean_profit_pct'] = new['mean_profit_pct'] * 100
|
||||
new['total_profit_pct'] = new['total_profit_pct'] * 100
|
||||
|
||||
_print_table(new, sortcols)
|
||||
_print_table(new, sortcols, name=f"Group {g}:",
|
||||
to_csv=to_csv, csv_path=csv_path)
|
||||
else:
|
||||
logger.warning("Invalid group mask specified.")
|
||||
|
||||
|
||||
def _do_rejected_signals_output(rejected_signals_df: pd.DataFrame,
|
||||
to_csv: bool = False, csv_path=None) -> None:
|
||||
cols = ['pair', 'date', 'enter_tag']
|
||||
sortcols = ['date', 'pair', 'enter_tag']
|
||||
_print_table(rejected_signals_df[cols],
|
||||
sortcols,
|
||||
show_index=False,
|
||||
name="Rejected Signals:",
|
||||
to_csv=to_csv,
|
||||
csv_path=csv_path)
|
||||
|
||||
|
||||
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
|
||||
if timerange:
|
||||
if timerange.starttype == 'date':
|
||||
@@ -192,38 +214,64 @@ def prepare_results(analysed_trades, stratname,
|
||||
return res_df
|
||||
|
||||
|
||||
def print_results(res_df, analysis_groups, indicator_list):
|
||||
def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_list: List[str],
|
||||
csv_path: Path, rejected_signals=None, to_csv=False):
|
||||
if res_df.shape[0] > 0:
|
||||
if analysis_groups:
|
||||
_do_group_table_output(res_df, analysis_groups)
|
||||
_do_group_table_output(res_df, analysis_groups, to_csv=to_csv, csv_path=csv_path)
|
||||
|
||||
if rejected_signals is not None:
|
||||
if rejected_signals.empty:
|
||||
print("There were no rejected signals.")
|
||||
else:
|
||||
_do_rejected_signals_output(rejected_signals, to_csv=to_csv, csv_path=csv_path)
|
||||
|
||||
# NB this can be large for big dataframes!
|
||||
if "all" in indicator_list:
|
||||
print(res_df)
|
||||
elif indicator_list is not None:
|
||||
_print_table(res_df,
|
||||
show_index=False,
|
||||
name="Indicators:",
|
||||
to_csv=to_csv,
|
||||
csv_path=csv_path)
|
||||
elif indicator_list is not None and indicator_list:
|
||||
available_inds = []
|
||||
for ind in indicator_list:
|
||||
if ind in res_df:
|
||||
available_inds.append(ind)
|
||||
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
|
||||
_print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False)
|
||||
_print_table(res_df[ilist],
|
||||
sortcols=['exit_reason'],
|
||||
show_index=False,
|
||||
name="Indicators:",
|
||||
to_csv=to_csv,
|
||||
csv_path=csv_path)
|
||||
else:
|
||||
print("\\No trades to show")
|
||||
|
||||
|
||||
def _print_table(df, sortcols=None, show_index=False):
|
||||
def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None,
|
||||
to_csv=False, csv_path: Path):
|
||||
if (sortcols is not None):
|
||||
data = df.sort_values(sortcols)
|
||||
else:
|
||||
data = df
|
||||
|
||||
print(
|
||||
tabulate(
|
||||
data,
|
||||
headers='keys',
|
||||
tablefmt='psql',
|
||||
showindex=show_index
|
||||
if to_csv:
|
||||
safe_name = Path(csv_path, name.lower().replace(" ", "_").replace(":", "") + ".csv")
|
||||
data.to_csv(safe_name)
|
||||
print(f"Saved {name} to {safe_name}")
|
||||
else:
|
||||
if name is not None:
|
||||
print(name)
|
||||
|
||||
print(
|
||||
tabulate(
|
||||
data,
|
||||
headers='keys',
|
||||
tablefmt='psql',
|
||||
showindex=show_index
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def process_entry_exit_reasons(config: Config):
|
||||
@@ -232,6 +280,11 @@ def process_entry_exit_reasons(config: Config):
|
||||
enter_reason_list = config.get('enter_reason_list', ["all"])
|
||||
exit_reason_list = config.get('exit_reason_list', ["all"])
|
||||
indicator_list = config.get('indicator_list', [])
|
||||
do_rejected = config.get('analysis_rejected', False)
|
||||
to_csv = config.get('analysis_to_csv', False)
|
||||
csv_path = Path(config.get('analysis_csv_path', config['exportfilename']))
|
||||
if to_csv and not csv_path.is_dir():
|
||||
raise OperationalException(f"Specified directory {csv_path} does not exist.")
|
||||
|
||||
timerange = TimeRange.parse_timerange(None if config.get(
|
||||
'timerange') is None else str(config.get('timerange')))
|
||||
@@ -241,8 +294,16 @@ def process_entry_exit_reasons(config: Config):
|
||||
for strategy_name, results in backtest_stats['strategy'].items():
|
||||
trades = load_backtest_data(config['exportfilename'], strategy_name)
|
||||
|
||||
if not trades.empty:
|
||||
if trades is not None and not trades.empty:
|
||||
signal_candles = _load_signal_candles(config['exportfilename'])
|
||||
|
||||
rej_df = None
|
||||
if do_rejected:
|
||||
rejected_signals_dict = _load_rejected_signals(config['exportfilename'])
|
||||
rej_df = prepare_results(rejected_signals_dict, strategy_name,
|
||||
enter_reason_list, exit_reason_list,
|
||||
timerange=timerange)
|
||||
|
||||
analysed_trades_dict = _process_candles_and_indicators(
|
||||
config['exchange']['pair_whitelist'], strategy_name,
|
||||
trades, signal_candles)
|
||||
@@ -253,7 +314,10 @@ def process_entry_exit_reasons(config: Config):
|
||||
|
||||
print_results(res_df,
|
||||
analysis_groups,
|
||||
indicator_list)
|
||||
indicator_list,
|
||||
rejected_signals=rej_df,
|
||||
to_csv=to_csv,
|
||||
csv_path=csv_path)
|
||||
|
||||
except ValueError as e:
|
||||
raise OperationalException(e) from e
|
||||
|
||||
@@ -2900,8 +2900,8 @@ class Exchange:
|
||||
if nominal_value >= tier['minNotional']:
|
||||
return (tier['maintenanceMarginRate'], tier['maintAmt'])
|
||||
|
||||
raise OperationalException("nominal value can not be lower than 0")
|
||||
raise ExchangeError("nominal value can not be lower than 0")
|
||||
# The lowest notional_floor for any pair in fetch_leverage_tiers is always 0 because it
|
||||
# describes the min amt for a tier, and the lowest tier will always go down to 0
|
||||
else:
|
||||
raise OperationalException(f"Cannot get maintenance ratio using {self.name}")
|
||||
raise ExchangeError(f"Cannot get maintenance ratio using {self.name}")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from gym import spaces
|
||||
from gymnasium import spaces
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||
|
||||
@@ -94,9 +94,12 @@ class Base3ActionRLEnv(BaseEnvironment):
|
||||
|
||||
observation = self._get_observation()
|
||||
|
||||
# user can play with time if they want
|
||||
truncated = False
|
||||
|
||||
self._update_history(info)
|
||||
|
||||
return observation, step_reward, self._done, info
|
||||
return observation, step_reward, self._done, truncated, info
|
||||
|
||||
def is_tradesignal(self, action: int) -> bool:
|
||||
"""
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from gym import spaces
|
||||
from gymnasium import spaces
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||
|
||||
@@ -96,9 +96,12 @@ class Base4ActionRLEnv(BaseEnvironment):
|
||||
|
||||
observation = self._get_observation()
|
||||
|
||||
# user can play with time if they want
|
||||
truncated = False
|
||||
|
||||
self._update_history(info)
|
||||
|
||||
return observation, step_reward, self._done, info
|
||||
return observation, step_reward, self._done, truncated, info
|
||||
|
||||
def is_tradesignal(self, action: int) -> bool:
|
||||
"""
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from gym import spaces
|
||||
from gymnasium import spaces
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||
|
||||
@@ -101,10 +101,12 @@ class Base5ActionRLEnv(BaseEnvironment):
|
||||
)
|
||||
|
||||
observation = self._get_observation()
|
||||
# user can play with time if they want
|
||||
truncated = False
|
||||
|
||||
self._update_history(info)
|
||||
|
||||
return observation, step_reward, self._done, info
|
||||
return observation, step_reward, self._done, truncated, info
|
||||
|
||||
def is_tradesignal(self, action: int) -> bool:
|
||||
"""
|
||||
|
||||
@@ -4,11 +4,11 @@ from abc import abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional, Type, Union
|
||||
|
||||
import gym
|
||||
import gymnasium as gym
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from gym import spaces
|
||||
from gym.utils import seeding
|
||||
from gymnasium import spaces
|
||||
from gymnasium.utils import seeding
|
||||
from pandas import DataFrame
|
||||
|
||||
|
||||
@@ -127,6 +127,14 @@ class BaseEnvironment(gym.Env):
|
||||
self.history: dict = {}
|
||||
self.trade_history: list = []
|
||||
|
||||
def get_attr(self, attr: str):
|
||||
"""
|
||||
Returns the attribute of the environment
|
||||
:param attr: attribute to return
|
||||
:return: attribute
|
||||
"""
|
||||
return getattr(self, attr)
|
||||
|
||||
@abstractmethod
|
||||
def set_action_space(self):
|
||||
"""
|
||||
@@ -203,7 +211,7 @@ class BaseEnvironment(gym.Env):
|
||||
self.close_trade_profit = []
|
||||
self._total_unrealized_profit = 1
|
||||
|
||||
return self._get_observation()
|
||||
return self._get_observation(), self.history
|
||||
|
||||
@abstractmethod
|
||||
def step(self, action: int):
|
||||
|
||||
@@ -6,7 +6,7 @@ from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
|
||||
|
||||
import gym
|
||||
import gymnasium as gym
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import pandas as pd
|
||||
@@ -16,13 +16,13 @@ from pandas import DataFrame
|
||||
from stable_baselines3.common.callbacks import EvalCallback
|
||||
from stable_baselines3.common.monitor import Monitor
|
||||
from stable_baselines3.common.utils import set_random_seed
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
|
||||
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment, Positions
|
||||
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback
|
||||
from freqtrade.persistence import Trade
|
||||
|
||||
@@ -46,8 +46,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||
'cpu_count', 1), max(int(self.max_system_threads / 2), 1))
|
||||
th.set_num_threads(self.max_threads)
|
||||
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
|
||||
self.train_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
|
||||
self.eval_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
|
||||
self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
|
||||
self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
|
||||
self.eval_callback: Optional[EvalCallback] = None
|
||||
self.model_type = self.freqai_info['rl_config']['model_type']
|
||||
self.rl_config = self.freqai_info['rl_config']
|
||||
@@ -431,9 +431,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||
return 0.
|
||||
|
||||
|
||||
def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
|
||||
def make_env(MyRLEnv: Type[BaseEnvironment], env_id: str, rank: int,
|
||||
seed: int, train_df: DataFrame, price: DataFrame,
|
||||
monitor: bool = False,
|
||||
env_info: Dict[str, Any] = {}) -> Callable:
|
||||
"""
|
||||
Utility function for multiprocessed env.
|
||||
@@ -450,8 +449,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
|
||||
|
||||
env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank,
|
||||
**env_info)
|
||||
if monitor:
|
||||
env = Monitor(env)
|
||||
|
||||
return env
|
||||
set_random_seed(seed)
|
||||
return _init
|
||||
|
||||
@@ -3,8 +3,9 @@ from typing import Any, Dict, Type, Union
|
||||
|
||||
from stable_baselines3.common.callbacks import BaseCallback
|
||||
from stable_baselines3.common.logger import HParam
|
||||
from stable_baselines3.common.vec_env import VecEnv
|
||||
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions
|
||||
|
||||
|
||||
class TensorboardCallback(BaseCallback):
|
||||
@@ -12,11 +13,13 @@ class TensorboardCallback(BaseCallback):
|
||||
Custom callback for plotting additional values in tensorboard and
|
||||
episodic summary reports.
|
||||
"""
|
||||
# Override training_env type to fix type errors
|
||||
training_env: Union[VecEnv, None] = None
|
||||
|
||||
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
|
||||
super().__init__(verbose)
|
||||
self.model: Any = None
|
||||
self.logger = None # type: Any
|
||||
self.training_env: BaseEnvironment = None # type: ignore
|
||||
self.logger: Any = None
|
||||
self.actions: Type[Enum] = actions
|
||||
|
||||
def _on_training_start(self) -> None:
|
||||
@@ -44,6 +47,8 @@ class TensorboardCallback(BaseCallback):
|
||||
def _on_step(self) -> bool:
|
||||
|
||||
local_info = self.locals["infos"][0]
|
||||
if self.training_env is None:
|
||||
return True
|
||||
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
|
||||
|
||||
for metric in local_info:
|
||||
|
||||
@@ -45,6 +45,7 @@ class BasePyTorchClassifier(BasePyTorchModel):
|
||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||
"""
|
||||
Filter the prediction features data and predict with it.
|
||||
:param dk: dk: The datakitchen object
|
||||
:param unfiltered_df: Full dataframe for the current backtest period.
|
||||
:return:
|
||||
:pred_df: dataframe containing the predictions
|
||||
@@ -78,7 +79,9 @@ class BasePyTorchClassifier(BasePyTorchModel):
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
predicted_classes = torch.argmax(probs, dim=-1)
|
||||
predicted_classes_str = self.decode_class_names(predicted_classes)
|
||||
pred_df_prob = DataFrame(probs.detach().numpy(), columns=class_names)
|
||||
# used .tolist to convert probs into an iterable, in this way Tensors
|
||||
# are automatically moved to the CPU first if necessary.
|
||||
pred_df_prob = DataFrame(probs.detach().tolist(), columns=class_names)
|
||||
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
|
||||
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
|
||||
return (pred_df, dk.do_predict)
|
||||
|
||||
@@ -45,6 +45,5 @@ class BasePyTorchRegressor(BasePyTorchModel):
|
||||
device=self.device
|
||||
)
|
||||
y = self.model.model(x)
|
||||
y = y.cpu()
|
||||
pred_df = DataFrame(y.detach().numpy(), columns=[dk.label_list[0]])
|
||||
pred_df = DataFrame(y.detach().tolist(), columns=[dk.label_list[0]])
|
||||
return (pred_df, dk.do_predict)
|
||||
|
||||
@@ -242,8 +242,8 @@ class IFreqaiModel(ABC):
|
||||
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
||||
)
|
||||
except Exception as msg:
|
||||
logger.warning(f"Training {pair} raised exception {msg.__class__.__name__}. "
|
||||
f"Message: {msg}, skipping.")
|
||||
logger.exception(f"Training {pair} raised exception {msg.__class__.__name__}. "
|
||||
f"Message: {msg}, skipping.")
|
||||
|
||||
self.train_timer('stop', pair)
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
from typing import Any, Dict, Type
|
||||
|
||||
import torch as th
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment
|
||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
|
||||
|
||||
|
||||
@@ -84,7 +85,9 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||
|
||||
return model
|
||||
|
||||
class MyRLEnv(Base5ActionRLEnv):
|
||||
MyRLEnv: Type[BaseEnvironment]
|
||||
|
||||
class MyRLEnv(Base5ActionRLEnv): # type: ignore[no-redef]
|
||||
"""
|
||||
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||
sets a custom reward based on profit and trade duration.
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Any, Dict
|
||||
|
||||
from pandas import DataFrame
|
||||
from stable_baselines3.common.callbacks import EvalCallback
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
||||
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
|
||||
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||
@@ -41,22 +41,25 @@ class ReinforcementLearner_multiproc(ReinforcementLearner):
|
||||
|
||||
env_info = self.pack_env_dict(dk.pair)
|
||||
|
||||
eval_freq = len(train_df) // self.max_threads
|
||||
|
||||
env_id = "train_env"
|
||||
self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
|
||||
train_df, prices_train,
|
||||
monitor=True,
|
||||
env_info=env_info) for i
|
||||
in range(self.max_threads)])
|
||||
self.train_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
|
||||
train_df, prices_train,
|
||||
env_info=env_info) for i
|
||||
in range(self.max_threads)]))
|
||||
|
||||
eval_env_id = 'eval_env'
|
||||
self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
|
||||
test_df, prices_test,
|
||||
monitor=True,
|
||||
env_info=env_info) for i
|
||||
in range(self.max_threads)])
|
||||
self.eval_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
|
||||
test_df, prices_test,
|
||||
env_info=env_info) for i
|
||||
in range(self.max_threads)]))
|
||||
|
||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||
render=False, eval_freq=len(train_df),
|
||||
render=False, eval_freq=eval_freq,
|
||||
best_model_save_path=str(dk.data_path))
|
||||
|
||||
# TENSORBOARD CALLBACK DOES NOT RECOMMENDED TO USE WITH MULTIPLE ENVS,
|
||||
# IT WILL RETURN FALSE INFORMATIONS, NEVERTHLESS NOT THREAD SAFE WITH SB3!!!
|
||||
actions = self.train_env.env_method("get_actions")[0]
|
||||
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)
|
||||
|
||||
@@ -1721,10 +1721,8 @@ class FreqtradeBot(LoggingMixin):
|
||||
else:
|
||||
trade.exit_order_status = reason
|
||||
|
||||
order = trade.select_order_by_order_id(order_id)
|
||||
if not order:
|
||||
raise DependencyException(
|
||||
f"Order_obj not found for {order_id}. This should not have happened.")
|
||||
order_or_none = trade.select_order_by_order_id(order_id)
|
||||
order = self.order_obj_or_raise(order_id, order_or_none)
|
||||
|
||||
profit_rate: float = trade.safe_close_rate
|
||||
profit_trade = trade.calc_profit(rate=profit_rate)
|
||||
@@ -1765,6 +1763,12 @@ class FreqtradeBot(LoggingMixin):
|
||||
# Send the message
|
||||
self.rpc.send_msg(msg)
|
||||
|
||||
def order_obj_or_raise(self, order_id: str, order_obj: Optional[Order]) -> Order:
|
||||
if not order_obj:
|
||||
raise DependencyException(
|
||||
f"Order_obj not found for {order_id}. This should not have happened.")
|
||||
return order_obj
|
||||
|
||||
#
|
||||
# Common update trade state methods
|
||||
#
|
||||
@@ -1803,10 +1807,8 @@ class FreqtradeBot(LoggingMixin):
|
||||
# Handling of this will happen in check_handle_timedout.
|
||||
return True
|
||||
|
||||
order_obj = trade.select_order_by_order_id(order_id)
|
||||
if not order_obj:
|
||||
raise DependencyException(
|
||||
f"Order_obj not found for {order_id}. This should not have happened.")
|
||||
order_obj_or_none = trade.select_order_by_order_id(order_id)
|
||||
order_obj = self.order_obj_or_raise(order_id, order_obj_or_none)
|
||||
|
||||
self.handle_order_fee(trade, order_obj, order)
|
||||
|
||||
@@ -1824,16 +1826,18 @@ class FreqtradeBot(LoggingMixin):
|
||||
# Must also run for partial exits
|
||||
# TODO: Margin will need to use interest_rate as well.
|
||||
# interest_rate = self.exchange.get_interest_rate()
|
||||
trade.set_liquidation_price(self.exchange.get_liquidation_price(
|
||||
pair=trade.pair,
|
||||
open_rate=trade.open_rate,
|
||||
is_short=trade.is_short,
|
||||
amount=trade.amount,
|
||||
stake_amount=trade.stake_amount,
|
||||
leverage=trade.leverage,
|
||||
wallet_balance=trade.stake_amount,
|
||||
))
|
||||
|
||||
try:
|
||||
trade.set_liquidation_price(self.exchange.get_liquidation_price(
|
||||
pair=trade.pair,
|
||||
open_rate=trade.open_rate,
|
||||
is_short=trade.is_short,
|
||||
amount=trade.amount,
|
||||
stake_amount=trade.stake_amount,
|
||||
leverage=trade.leverage,
|
||||
wallet_balance=trade.stake_amount,
|
||||
))
|
||||
except DependencyException:
|
||||
logger.warning('Unable to calculate liquidation price')
|
||||
# Updating wallets when order is closed
|
||||
self.wallets.update()
|
||||
Trade.commit()
|
||||
|
||||
@@ -9,7 +9,6 @@ from copy import deepcopy
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import pandas as pd
|
||||
from numpy import nan
|
||||
from pandas import DataFrame
|
||||
|
||||
@@ -28,8 +27,10 @@ from freqtrade.exchange import (amount_to_contract_precision, price_to_precision
|
||||
from freqtrade.mixins import LoggingMixin
|
||||
from freqtrade.optimize.backtest_caching import get_strategy_run_id
|
||||
from freqtrade.optimize.bt_progress import BTProgress
|
||||
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, show_backtest_results,
|
||||
store_backtest_signal_candles,
|
||||
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_rejected_signals,
|
||||
generate_trade_signal_candles,
|
||||
show_backtest_results,
|
||||
store_backtest_analysis_results,
|
||||
store_backtest_stats)
|
||||
from freqtrade.persistence import LocalTrade, Order, PairLocks, Trade
|
||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||
@@ -84,6 +85,8 @@ class Backtesting:
|
||||
self.strategylist: List[IStrategy] = []
|
||||
self.all_results: Dict[str, Dict] = {}
|
||||
self.processed_dfs: Dict[str, Dict] = {}
|
||||
self.rejected_dict: Dict[str, List] = {}
|
||||
self.rejected_df: Dict[str, Dict] = {}
|
||||
|
||||
self._exchange_name = self.config['exchange']['name']
|
||||
self.exchange = ExchangeResolver.load_exchange(
|
||||
@@ -1056,6 +1059,18 @@ class Backtesting:
|
||||
return None
|
||||
return row
|
||||
|
||||
def _collate_rejected(self, pair, row):
|
||||
"""
|
||||
Temporarily store rejected signal information for downstream use in backtesting_analysis
|
||||
"""
|
||||
# It could be fun to enable hyperopt mode to write
|
||||
# a loss function to reduce rejected signals
|
||||
if (self.config.get('export', 'none') == 'signals' and
|
||||
self.dataprovider.runmode == RunMode.BACKTEST):
|
||||
if pair not in self.rejected_dict:
|
||||
self.rejected_dict[pair] = []
|
||||
self.rejected_dict[pair].append([row[DATE_IDX], row[ENTER_TAG_IDX]])
|
||||
|
||||
def backtest_loop(
|
||||
self, row: Tuple, pair: str, current_time: datetime, end_date: datetime,
|
||||
open_trade_count_start: int, trade_dir: Optional[LongShort],
|
||||
@@ -1081,20 +1096,22 @@ class Backtesting:
|
||||
if (
|
||||
(self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0)
|
||||
and is_first
|
||||
and self.trade_slot_available(open_trade_count_start)
|
||||
and current_time != end_date
|
||||
and trade_dir is not None
|
||||
and not PairLocks.is_pair_locked(pair, row[DATE_IDX], trade_dir)
|
||||
):
|
||||
trade = self._enter_trade(pair, row, trade_dir)
|
||||
if trade:
|
||||
# TODO: hacky workaround to avoid opening > max_open_trades
|
||||
# This emulates previous behavior - not sure if this is correct
|
||||
# Prevents entering if the trade-slot was freed in this candle
|
||||
open_trade_count_start += 1
|
||||
# logger.debug(f"{pair} - Emulate creation of new trade: {trade}.")
|
||||
LocalTrade.add_bt_trade(trade)
|
||||
self.wallets.update()
|
||||
if (self.trade_slot_available(open_trade_count_start)):
|
||||
trade = self._enter_trade(pair, row, trade_dir)
|
||||
if trade:
|
||||
# TODO: hacky workaround to avoid opening > max_open_trades
|
||||
# This emulates previous behavior - not sure if this is correct
|
||||
# Prevents entering if the trade-slot was freed in this candle
|
||||
open_trade_count_start += 1
|
||||
# logger.debug(f"{pair} - Emulate creation of new trade: {trade}.")
|
||||
LocalTrade.add_bt_trade(trade)
|
||||
self.wallets.update()
|
||||
else:
|
||||
self._collate_rejected(pair, row)
|
||||
|
||||
for trade in list(LocalTrade.bt_trades_open_pp[pair]):
|
||||
# 3. Process entry orders.
|
||||
@@ -1236,8 +1253,8 @@ class Backtesting:
|
||||
def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, DataFrame],
|
||||
timerange: TimeRange):
|
||||
self.progress.init_step(BacktestState.ANALYZE, 0)
|
||||
|
||||
logger.info(f"Running backtesting for Strategy {strat.get_strategy_name()}")
|
||||
strategy_name = strat.get_strategy_name()
|
||||
logger.info(f"Running backtesting for Strategy {strategy_name}")
|
||||
backtest_start_time = datetime.now(timezone.utc)
|
||||
self._set_strategy(strat)
|
||||
|
||||
@@ -1272,37 +1289,21 @@ class Backtesting:
|
||||
)
|
||||
backtest_end_time = datetime.now(timezone.utc)
|
||||
results.update({
|
||||
'run_id': self.run_ids.get(strat.get_strategy_name(), ''),
|
||||
'run_id': self.run_ids.get(strategy_name, ''),
|
||||
'backtest_start_time': int(backtest_start_time.timestamp()),
|
||||
'backtest_end_time': int(backtest_end_time.timestamp()),
|
||||
})
|
||||
self.all_results[self.strategy.get_strategy_name()] = results
|
||||
self.all_results[strategy_name] = results
|
||||
|
||||
if (self.config.get('export', 'none') == 'signals' and
|
||||
self.dataprovider.runmode == RunMode.BACKTEST):
|
||||
self._generate_trade_signal_candles(preprocessed_tmp, results)
|
||||
self.processed_dfs[strategy_name] = generate_trade_signal_candles(
|
||||
preprocessed_tmp, results)
|
||||
self.rejected_df[strategy_name] = generate_rejected_signals(
|
||||
preprocessed_tmp, self.rejected_dict)
|
||||
|
||||
return min_date, max_date
|
||||
|
||||
def _generate_trade_signal_candles(self, preprocessed_df, bt_results):
|
||||
signal_candles_only = {}
|
||||
for pair in preprocessed_df.keys():
|
||||
signal_candles_only_df = DataFrame()
|
||||
|
||||
pairdf = preprocessed_df[pair]
|
||||
resdf = bt_results['results']
|
||||
pairresults = resdf.loc[(resdf["pair"] == pair)]
|
||||
|
||||
if pairdf.shape[0] > 0:
|
||||
for t, v in pairresults.open_date.items():
|
||||
allinds = pairdf.loc[(pairdf['date'] < v)]
|
||||
signal_inds = allinds.iloc[[-1]]
|
||||
signal_candles_only_df = pd.concat([signal_candles_only_df, signal_inds])
|
||||
|
||||
signal_candles_only[pair] = signal_candles_only_df
|
||||
|
||||
self.processed_dfs[self.strategy.get_strategy_name()] = signal_candles_only
|
||||
|
||||
def _get_min_cached_backtest_date(self):
|
||||
min_backtest_date = None
|
||||
backtest_cache_age = self.config.get('backtest_cache', constants.BACKTEST_CACHE_DEFAULT)
|
||||
@@ -1365,8 +1366,9 @@ class Backtesting:
|
||||
|
||||
if (self.config.get('export', 'none') == 'signals' and
|
||||
self.dataprovider.runmode == RunMode.BACKTEST):
|
||||
store_backtest_signal_candles(
|
||||
self.config['exportfilename'], self.processed_dfs, dt_appendix)
|
||||
store_backtest_analysis_results(
|
||||
self.config['exportfilename'], self.processed_dfs, self.rejected_df,
|
||||
dt_appendix)
|
||||
|
||||
# Results may be mixed up now. Sort them so they follow --strategy-list order.
|
||||
if 'strategy_list' in self.config and len(self.results) > 0:
|
||||
|
||||
@@ -4,7 +4,7 @@ from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from pandas import DataFrame, to_datetime
|
||||
from pandas import DataFrame, concat, to_datetime
|
||||
from tabulate import tabulate
|
||||
|
||||
from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
|
||||
@@ -46,29 +46,80 @@ def store_backtest_stats(
|
||||
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
|
||||
|
||||
|
||||
def store_backtest_signal_candles(
|
||||
recordfilename: Path, candles: Dict[str, Dict], dtappendix: str) -> Path:
|
||||
def _store_backtest_analysis_data(
|
||||
recordfilename: Path, data: Dict[str, Dict],
|
||||
dtappendix: str, name: str) -> Path:
|
||||
"""
|
||||
Stores backtest trade signal candles
|
||||
Stores backtest trade candles for analysis
|
||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||
Filenames will be appended with a timestamp right before the suffix
|
||||
while for directories, <directory>/backtest-result-<datetime>_signals.pkl will be used
|
||||
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
|
||||
as filename
|
||||
:param stats: Dict containing the backtesting signal candles
|
||||
:param candles: Dict containing the backtesting data for analysis
|
||||
:param dtappendix: Datetime to use for the filename
|
||||
:param name: Name to use for the file, e.g. signals, rejected
|
||||
"""
|
||||
if recordfilename.is_dir():
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}_signals.pkl')
|
||||
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
|
||||
else:
|
||||
filename = Path.joinpath(
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_signals.pkl'
|
||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
|
||||
)
|
||||
|
||||
file_dump_joblib(filename, candles)
|
||||
file_dump_joblib(filename, data)
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def store_backtest_analysis_results(
|
||||
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
|
||||
dtappendix: str) -> None:
|
||||
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
|
||||
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
|
||||
|
||||
|
||||
def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame],
|
||||
bt_results: Dict[str, Any]) -> DataFrame:
|
||||
signal_candles_only = {}
|
||||
for pair in preprocessed_df.keys():
|
||||
signal_candles_only_df = DataFrame()
|
||||
|
||||
pairdf = preprocessed_df[pair]
|
||||
resdf = bt_results['results']
|
||||
pairresults = resdf.loc[(resdf["pair"] == pair)]
|
||||
|
||||
if pairdf.shape[0] > 0:
|
||||
for t, v in pairresults.open_date.items():
|
||||
allinds = pairdf.loc[(pairdf['date'] < v)]
|
||||
signal_inds = allinds.iloc[[-1]]
|
||||
signal_candles_only_df = concat([
|
||||
signal_candles_only_df.infer_objects(),
|
||||
signal_inds.infer_objects()])
|
||||
|
||||
signal_candles_only[pair] = signal_candles_only_df
|
||||
return signal_candles_only
|
||||
|
||||
|
||||
def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame],
|
||||
rejected_dict: Dict[str, DataFrame]) -> Dict[str, DataFrame]:
|
||||
rejected_candles_only = {}
|
||||
for pair, signals in rejected_dict.items():
|
||||
rejected_signals_only_df = DataFrame()
|
||||
pairdf = preprocessed_df[pair]
|
||||
|
||||
for t in signals:
|
||||
data_df_row = pairdf.loc[(pairdf['date'] == t[0])].copy()
|
||||
data_df_row['pair'] = pair
|
||||
data_df_row['enter_tag'] = t[1]
|
||||
|
||||
rejected_signals_only_df = concat([
|
||||
rejected_signals_only_df.infer_objects(),
|
||||
data_df_row.infer_objects()])
|
||||
|
||||
rejected_candles_only[pair] = rejected_signals_only_df
|
||||
return rejected_candles_only
|
||||
|
||||
|
||||
def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
||||
"""
|
||||
Generate floatformat (goes in line with _generate_result_line())
|
||||
|
||||
@@ -247,14 +247,17 @@ def pair_candles(
|
||||
|
||||
@router.get('/pair_history', response_model=PairHistory, tags=['candle data'])
|
||||
def pair_history(pair: str, timeframe: str, timerange: str, strategy: str,
|
||||
freqaimodel: Optional[str] = None,
|
||||
config=Depends(get_config), exchange=Depends(get_exchange)):
|
||||
# The initial call to this endpoint can be slow, as it may need to initialize
|
||||
# the exchange class.
|
||||
config = deepcopy(config)
|
||||
config.update({
|
||||
'strategy': strategy,
|
||||
'timerange': timerange,
|
||||
'freqaimodel': freqaimodel if freqaimodel else config.get('freqaimodel'),
|
||||
})
|
||||
return RPC._rpc_analysed_history_full(config, pair, timeframe, timerange, exchange)
|
||||
return RPC._rpc_analysed_history_full(config, pair, timeframe, exchange)
|
||||
|
||||
|
||||
@router.get('/plot_config', response_model=PlotConfig, tags=['candle data'])
|
||||
|
||||
@@ -420,16 +420,15 @@ class RPC:
|
||||
else:
|
||||
return 'draws'
|
||||
trades = Trade.get_trades([Trade.is_open.is_(False)], include_orders=False)
|
||||
# Sell reason
|
||||
# Duration
|
||||
dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []}
|
||||
# Exit reason
|
||||
exit_reasons = {}
|
||||
for trade in trades:
|
||||
if trade.exit_reason not in exit_reasons:
|
||||
exit_reasons[trade.exit_reason] = {'wins': 0, 'losses': 0, 'draws': 0}
|
||||
exit_reasons[trade.exit_reason][trade_win_loss(trade)] += 1
|
||||
|
||||
# Duration
|
||||
dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []}
|
||||
for trade in trades:
|
||||
if trade.close_date is not None and trade.open_date is not None:
|
||||
trade_dur = (trade.close_date - trade.open_date).total_seconds()
|
||||
dur[trade_win_loss(trade)].append(trade_dur)
|
||||
@@ -1216,8 +1215,8 @@ class RPC:
|
||||
|
||||
@staticmethod
|
||||
def _rpc_analysed_history_full(config: Config, pair: str, timeframe: str,
|
||||
timerange: str, exchange) -> Dict[str, Any]:
|
||||
timerange_parsed = TimeRange.parse_timerange(timerange)
|
||||
exchange) -> Dict[str, Any]:
|
||||
timerange_parsed = TimeRange.parse_timerange(config.get('timerange'))
|
||||
|
||||
_data = load_data(
|
||||
datadir=config["datadir"],
|
||||
@@ -1228,7 +1227,8 @@ class RPC:
|
||||
candle_type=config.get('candle_type_def', CandleType.SPOT)
|
||||
)
|
||||
if pair not in _data:
|
||||
raise RPCException(f"No data for {pair}, {timeframe} in {timerange} found.")
|
||||
raise RPCException(
|
||||
f"No data for {pair}, {timeframe} in {config.get('timerange')} found.")
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||
strategy = StrategyResolver.load_strategy(config)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[build-system]
|
||||
requires = ["setuptools >= 46.4.0", "wheel"]
|
||||
requires = ["setuptools >= 64.0.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.black]
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
-r docs/requirements-docs.txt
|
||||
|
||||
coveralls==3.3.1
|
||||
ruff==0.0.262
|
||||
ruff==0.0.263
|
||||
mypy==1.2.0
|
||||
pre-commit==3.2.2
|
||||
pytest==7.3.1
|
||||
@@ -25,6 +25,6 @@ nbconvert==7.3.1
|
||||
# mypy types
|
||||
types-cachetools==5.3.0.5
|
||||
types-filelock==3.2.7
|
||||
types-requests==2.28.11.17
|
||||
types-requests==2.29.0.0
|
||||
types-tabulate==0.9.0.2
|
||||
types-python-dateutil==2.8.19.12
|
||||
|
||||
@@ -3,10 +3,9 @@
|
||||
|
||||
# Required for freqai-rl
|
||||
torch==1.13.1; python_version < '3.11'
|
||||
stable-baselines3==1.7.0; python_version < '3.11'
|
||||
sb3-contrib==1.7.0; python_version < '3.11'
|
||||
# Gym is forced to this version by stable-baselines3.
|
||||
setuptools==65.5.1 # Should be removed when gym is fixed.
|
||||
gym==0.21; python_version < '3.11'
|
||||
#until these branches will be released we can use this
|
||||
gymnasium==0.28.1
|
||||
stable_baselines3==2.0.0a5
|
||||
sb3_contrib>=2.0.0a4
|
||||
# Progress bar for stable-baselines3 and sb3-contrib
|
||||
tqdm==4.65.0; python_version < '3.11'
|
||||
tqdm==4.65.0
|
||||
|
||||
@@ -2,16 +2,16 @@ numpy==1.24.3
|
||||
pandas==2.0.1
|
||||
pandas-ta==0.3.14b
|
||||
|
||||
ccxt==3.0.75
|
||||
ccxt==3.0.85
|
||||
cryptography==40.0.2
|
||||
aiohttp==3.8.4
|
||||
SQLAlchemy==2.0.10
|
||||
SQLAlchemy==2.0.12
|
||||
python-telegram-bot==20.2
|
||||
# can't be hard-pinned due to telegram-bot pinning httpx with ~
|
||||
httpx>=0.23.3
|
||||
arrow==1.2.3
|
||||
cachetools==4.2.2
|
||||
requests==2.28.2
|
||||
requests==2.29.0
|
||||
urllib3==1.26.15
|
||||
jsonschema==4.17.3
|
||||
TA-Lib==0.4.26
|
||||
@@ -22,7 +22,7 @@ jinja2==3.1.2
|
||||
tables==3.8.0
|
||||
blosc==1.11.1
|
||||
joblib==1.2.0
|
||||
rich==13.3.4
|
||||
rich==13.3.5
|
||||
pyarrow==11.0.0; platform_machine != 'armv7l'
|
||||
|
||||
# find first, C search in arrays
|
||||
@@ -31,7 +31,7 @@ py_find_1st==1.1.5
|
||||
# Load ticker files 30% faster
|
||||
python-rapidjson==1.10
|
||||
# Properly format api responses
|
||||
orjson==3.8.10
|
||||
orjson==3.8.11
|
||||
|
||||
# Notify systemd
|
||||
sdnotify==0.3.2
|
||||
@@ -39,7 +39,7 @@ sdnotify==0.3.2
|
||||
# API Server
|
||||
fastapi==0.95.1
|
||||
pydantic==1.10.7
|
||||
uvicorn==0.21.1
|
||||
uvicorn==0.22.0
|
||||
pyjwt==2.6.0
|
||||
aiofiles==23.1.0
|
||||
psutil==5.9.5
|
||||
@@ -60,3 +60,4 @@ websockets==11.0.2
|
||||
janus==1.0.0
|
||||
|
||||
ast-comments==1.0.1
|
||||
packaging==23.1
|
||||
|
||||
@@ -348,12 +348,13 @@ class FtRestClient():
|
||||
params['limit'] = limit
|
||||
return self._get("pair_candles", params=params)
|
||||
|
||||
def pair_history(self, pair, timeframe, strategy, timerange=None):
|
||||
def pair_history(self, pair, timeframe, strategy, timerange=None, freqaimodel=None):
|
||||
"""Return historic, analyzed dataframe
|
||||
|
||||
:param pair: Pair to get data for
|
||||
:param timeframe: Only pairs with this timeframe available.
|
||||
:param strategy: Strategy to analyze and get values for
|
||||
:param freqaimodel: FreqAI model to use for analysis
|
||||
:param timerange: Timerange to get data for (same format than --timerange endpoints)
|
||||
:return: json object
|
||||
"""
|
||||
@@ -361,6 +362,7 @@ class FtRestClient():
|
||||
"pair": pair,
|
||||
"timeframe": timeframe,
|
||||
"strategy": strategy,
|
||||
"freqaimodel": freqaimodel,
|
||||
"timerange": timerange if timerange else '',
|
||||
})
|
||||
|
||||
|
||||
26
setup.py
26
setup.py
@@ -12,16 +12,19 @@ hyperopt = [
|
||||
|
||||
freqai = [
|
||||
'scikit-learn',
|
||||
'joblib',
|
||||
'catboost; platform_machine != "aarch64"',
|
||||
'lightgbm',
|
||||
'xgboost'
|
||||
'xgboost',
|
||||
'tensorboard'
|
||||
]
|
||||
|
||||
freqai_rl = [
|
||||
'torch',
|
||||
'gymnasium',
|
||||
'stable-baselines3',
|
||||
'gym==0.21',
|
||||
'sb3-contrib'
|
||||
'sb3-contrib',
|
||||
'tqdm'
|
||||
]
|
||||
|
||||
hdf5 = [
|
||||
@@ -32,11 +35,20 @@ hdf5 = [
|
||||
develop = [
|
||||
'coveralls',
|
||||
'mypy',
|
||||
'ruff',
|
||||
'pre-commit',
|
||||
'pytest',
|
||||
'pytest-asyncio',
|
||||
'pytest-cov',
|
||||
'pytest-mock',
|
||||
'pytest-random-order',
|
||||
'isort',
|
||||
'time-machine',
|
||||
'types-cachetools',
|
||||
'types-filelock',
|
||||
'types-requests',
|
||||
'types-tabulate',
|
||||
'types-python-dateutil'
|
||||
]
|
||||
|
||||
jupyter = [
|
||||
@@ -91,7 +103,13 @@ setup(
|
||||
'aiofiles',
|
||||
'schedule',
|
||||
'websockets',
|
||||
'janus'
|
||||
'janus',
|
||||
'ast-comments',
|
||||
'aiohttp',
|
||||
'cryptography',
|
||||
'httpx',
|
||||
'python-dateutil',
|
||||
'packaging',
|
||||
],
|
||||
extras_require={
|
||||
'dev': all_extra,
|
||||
|
||||
3
setup.sh
3
setup.sh
@@ -49,8 +49,7 @@ function updateenv() {
|
||||
source .env/bin/activate
|
||||
SYS_ARCH=$(uname -m)
|
||||
echo "pip install in-progress. Please wait..."
|
||||
# Setuptools 65.5.0 is the last version that can install gym==0.21.0
|
||||
${PYTHON} -m pip install --upgrade pip==23.0.1 wheel==0.38.4 setuptools==65.5.1
|
||||
${PYTHON} -m pip install --upgrade pip wheel setuptools
|
||||
REQUIREMENTS_HYPEROPT=""
|
||||
REQUIREMENTS_PLOT=""
|
||||
REQUIREMENTS_FREQAI=""
|
||||
|
||||
@@ -200,8 +200,17 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp
|
||||
assert 'trailing_stop_loss' in captured.out
|
||||
|
||||
# test date filtering
|
||||
args = get_args(base_args + ['--timerange', "20180129-20180130"])
|
||||
args = get_args(base_args +
|
||||
['--analysis-groups', "0", "1", "2",
|
||||
'--timerange', "20180129-20180130"]
|
||||
)
|
||||
start_analysis_entries_exits(args)
|
||||
captured = capsys.readouterr()
|
||||
assert 'enter_tag_long_a' in captured.out
|
||||
assert 'enter_tag_long_b' not in captured.out
|
||||
|
||||
# Due to the backtest mock, there's no rejected signals generated.
|
||||
args = get_args(base_args + ['--rejected-signals'])
|
||||
start_analysis_entries_exits(args)
|
||||
captured = capsys.readouterr()
|
||||
assert 'no rejected signals' in captured.out
|
||||
|
||||
@@ -4932,7 +4932,7 @@ def test_get_maintenance_ratio_and_amt_exceptions(mocker, default_conf, leverage
|
||||
|
||||
exchange._leverage_tiers = leverage_tiers
|
||||
with pytest.raises(
|
||||
OperationalException,
|
||||
DependencyException,
|
||||
match='nominal value can not be lower than 0',
|
||||
):
|
||||
exchange.get_maintenance_ratio_and_amt('1000SHIB/USDT:USDT', -1)
|
||||
|
||||
@@ -354,7 +354,7 @@ def test_backtesting_start(default_conf, mocker, caplog) -> None:
|
||||
mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats')
|
||||
mocker.patch('freqtrade.optimize.backtesting.show_backtest_results')
|
||||
sbs = mocker.patch('freqtrade.optimize.backtesting.store_backtest_stats')
|
||||
sbc = mocker.patch('freqtrade.optimize.backtesting.store_backtest_signal_candles')
|
||||
sbc = mocker.patch('freqtrade.optimize.backtesting.store_backtest_analysis_results')
|
||||
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||
PropertyMock(return_value=['UNITTEST/BTC']))
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ from freqtrade.optimize.optimize_reports import (_get_resample_from_period, gene
|
||||
generate_periodic_breakdown_stats,
|
||||
generate_strategy_comparison,
|
||||
generate_trading_stats, show_sorted_pairlist,
|
||||
store_backtest_signal_candles,
|
||||
store_backtest_analysis_results,
|
||||
store_backtest_stats, text_table_bt_results,
|
||||
text_table_exit_reason, text_table_strategy)
|
||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||
@@ -232,17 +232,17 @@ def test_store_backtest_candles(testdatadir, mocker):
|
||||
candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}}
|
||||
|
||||
# mock directory exporting
|
||||
store_backtest_signal_candles(testdatadir, candle_dict, '2022_01_01_15_05_13')
|
||||
store_backtest_analysis_results(testdatadir, candle_dict, {}, '2022_01_01_15_05_13')
|
||||
|
||||
assert dump_mock.call_count == 1
|
||||
assert dump_mock.call_count == 2
|
||||
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
||||
assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl')
|
||||
|
||||
dump_mock.reset_mock()
|
||||
# mock file exporting
|
||||
filename = Path(testdatadir / 'testresult')
|
||||
store_backtest_signal_candles(filename, candle_dict, '2022_01_01_15_05_13')
|
||||
assert dump_mock.call_count == 1
|
||||
store_backtest_analysis_results(filename, candle_dict, {}, '2022_01_01_15_05_13')
|
||||
assert dump_mock.call_count == 2
|
||||
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
||||
# result will be testdatadir / testresult-<timestamp>_signals.pkl
|
||||
assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl')
|
||||
@@ -254,10 +254,11 @@ def test_write_read_backtest_candles(tmpdir):
|
||||
candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}}
|
||||
|
||||
# test directory exporting
|
||||
stored_file = store_backtest_signal_candles(Path(tmpdir), candle_dict, '2022_01_01_15_05_13')
|
||||
scp = stored_file.open("rb")
|
||||
pickled_signal_candles = joblib.load(scp)
|
||||
scp.close()
|
||||
sample_date = '2022_01_01_15_05_13'
|
||||
store_backtest_analysis_results(Path(tmpdir), candle_dict, {}, sample_date)
|
||||
stored_file = Path(tmpdir / f'backtest-result-{sample_date}_signals.pkl')
|
||||
with stored_file.open("rb") as scp:
|
||||
pickled_signal_candles = joblib.load(scp)
|
||||
|
||||
assert pickled_signal_candles.keys() == candle_dict.keys()
|
||||
assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys()
|
||||
@@ -268,10 +269,10 @@ def test_write_read_backtest_candles(tmpdir):
|
||||
|
||||
# test file exporting
|
||||
filename = Path(tmpdir / 'testresult')
|
||||
stored_file = store_backtest_signal_candles(filename, candle_dict, '2022_01_01_15_05_13')
|
||||
scp = stored_file.open("rb")
|
||||
pickled_signal_candles = joblib.load(scp)
|
||||
scp.close()
|
||||
store_backtest_analysis_results(filename, candle_dict, {}, sample_date)
|
||||
stored_file = Path(tmpdir / f'testresult-{sample_date}_signals.pkl')
|
||||
with stored_file.open("rb") as scp:
|
||||
pickled_signal_candles = joblib.load(scp)
|
||||
|
||||
assert pickled_signal_candles.keys() == candle_dict.keys()
|
||||
assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys()
|
||||
|
||||
@@ -825,6 +825,9 @@ async def test_telegram_stats(default_conf, update, ticker, fee, mocker, is_shor
|
||||
assert 'Exit Reason' in msg_mock.call_args_list[-1][0][0]
|
||||
assert 'ROI' in msg_mock.call_args_list[-1][0][0]
|
||||
assert 'Avg. Duration' in msg_mock.call_args_list[-1][0][0]
|
||||
# Duration is not only N/A
|
||||
assert '0:19:00' in msg_mock.call_args_list[-1][0][0]
|
||||
assert 'N/A' in msg_mock.call_args_list[-1][0][0]
|
||||
msg_mock.reset_mock()
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user