diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0393b5cb9..8ceac4a7f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -160,7 +160,8 @@ jobs:
- name: Installation - macOS
if: runner.os == 'macOS'
run: |
- brew update
+ # brew update
+ # TODO: Should be the brew upgrade
# homebrew fails to update python due to unlinking failures
# https://github.com/actions/runner-images/issues/6817
rm /usr/local/bin/2to3 || true
@@ -460,7 +461,7 @@ jobs:
python setup.py sdist bdist_wheel
- name: Publish to PyPI (Test)
- uses: pypa/gh-action-pypi-publish@v1.8.6
+ uses: pypa/gh-action-pypi-publish@v1.8.7
if: (github.event_name == 'release')
with:
user: __token__
@@ -468,7 +469,7 @@ jobs:
repository_url: https://test.pypi.org/legacy/
- name: Publish to PyPI
- uses: pypa/gh-action-pypi-publish@v1.8.6
+ uses: pypa/gh-action-pypi-publish@v1.8.7
if: (github.event_name == 'release')
with:
user: __token__
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 476d63847..56c8a6010 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -18,7 +18,7 @@ repos:
- types-requests==2.31.0.1
- types-tabulate==0.9.0.2
- types-python-dateutil==2.8.19.13
- - SQLAlchemy==2.0.16
+ - SQLAlchemy==2.0.17
# stages: [push]
- repo: https://github.com/pycqa/isort
diff --git a/docs/advanced-hyperopt.md b/docs/advanced-hyperopt.md
index ff0521f4f..eb8bf3f84 100644
--- a/docs/advanced-hyperopt.md
+++ b/docs/advanced-hyperopt.md
@@ -136,7 +136,7 @@ class MyAwesomeStrategy(IStrategy):
### Dynamic parameters
-Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
+Parameters can also be defined dynamically, but must be available to the instance once the [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
``` python
diff --git a/docs/developer.md b/docs/developer.md
index 2782f0117..4784e5352 100644
--- a/docs/developer.md
+++ b/docs/developer.md
@@ -453,7 +453,13 @@ Once the PR against stable is merged (best right after merging):
* Use the button "Draft a new release" in the Github UI (subsection releases).
* Use the version-number specified as tag.
* Use "stable" as reference (this step comes after the above PR is merged).
-* Use the above changelog as release comment (as codeblock)
+* Use the above changelog as release comment (as codeblock).
+* Use the below snippet for the new release
+
+??? Tip "Release template"
+ ````
+ --8<-- "includes/release_template.md"
+ ````
## Releases
diff --git a/docs/freqai-configuration.md b/docs/freqai-configuration.md
index 6e2ed8379..090fa8415 100644
--- a/docs/freqai-configuration.md
+++ b/docs/freqai-configuration.md
@@ -160,7 +160,7 @@ Below are the values you can expect to include/use inside a typical strategy dat
|------------|-------------|
| `df['&*']` | Any dataframe column prepended with `&` in `set_freqai_targets()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`.
**Datatype:** Depends on the output of the model.
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`).
**Datatype:** Float.
-| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`.
**Datatype:** Integer between -2 and 2.
+| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`.
**Datatype:** Integer between -2 and 2.
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di).
**Datatype:** Float.
| `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md).
**Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`.
**Datatype:** Depends on the output of the model.
diff --git a/docs/includes/release_template.md b/docs/includes/release_template.md
new file mode 100644
index 000000000..87a3564da
--- /dev/null
+++ b/docs/includes/release_template.md
@@ -0,0 +1,37 @@
+## Highlighted changes
+
+- ...
+
+### How to update
+
+As always, you can update your bot using one of the following commands:
+
+#### docker-compose
+
+```bash
+docker-compose pull
+docker-compose up -d
+```
+
+#### Installation via setup script
+
+```
+# Deactivate venv and run
+./setup.sh --update
+```
+
+#### Plain native installation
+
+```
+git pull
+pip install -U -r requirements.txt
+```
+
+
+Expand full changelog
+
+```
+
+```
+
+
diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt
index 5c936a868..d6ee2fd14 100644
--- a/docs/requirements-docs.txt
+++ b/docs/requirements-docs.txt
@@ -1,6 +1,6 @@
markdown==3.3.7
mkdocs==1.4.3
-mkdocs-material==9.1.16
+mkdocs-material==9.1.17
mdx_truly_sane_lists==1.3
pymdown-extensions==10.0.1
jinja2==3.1.2
diff --git a/docs/strategy_migration.md b/docs/strategy_migration.md
index 353da0ccb..d00349d1d 100644
--- a/docs/strategy_migration.md
+++ b/docs/strategy_migration.md
@@ -800,8 +800,8 @@ class MyCoolFreqaiModel(BaseRegressionModel):
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
- dk.DI_values = np.zeros(len(outliers.index))
- dk.do_predict = outliers.to_numpy()
+ dk.DI_values = np.zeros(outliers.shape[0])
+ dk.do_predict = outliers
# ... your custom code
return (pred_df, dk.do_predict)
diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py
index 8f7717dd2..2c8dde56d 100644
--- a/freqtrade/__init__.py
+++ b/freqtrade/__init__.py
@@ -1,5 +1,5 @@
""" Freqtrade bot """
-__version__ = '2023.6.dev'
+__version__ = '2023.7.dev'
if 'dev' in __version__:
from pathlib import Path
diff --git a/freqtrade/commands/list_commands.py b/freqtrade/commands/list_commands.py
index dcb102ce5..84f237f77 100644
--- a/freqtrade/commands/list_commands.py
+++ b/freqtrade/commands/list_commands.py
@@ -33,11 +33,11 @@ def start_list_exchanges(args: Dict[str, Any]) -> None:
else:
headers = {
'name': 'Exchange name',
- 'valid': 'Valid',
'supported': 'Supported',
'trade_modes': 'Markets',
'comment': 'Reason',
}
+ headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})
def build_entry(exchange: ValidExchangesType, valid: bool):
valid_entry = {'valid': exchange['valid']} if valid else {}
diff --git a/freqtrade/constants.py b/freqtrade/constants.py
index ffcd87744..de1e7aa51 100644
--- a/freqtrade/constants.py
+++ b/freqtrade/constants.py
@@ -112,6 +112,8 @@ MINIMAL_CONFIG = {
}
}
+__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}
+
# Required json-schema for user specified config
CONF_SCHEMA = {
'type': 'object',
@@ -354,7 +356,8 @@ CONF_SCHEMA = {
'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},
'retries': {'type': 'integer', 'minimum': 0},
'retry_delay': {'type': 'number', 'minimum': 0},
- **dict([(x, {'type': 'object'}) for x in RPCMessageType]),
+ **__MESSAGE_TYPE_DICT,
+ # **{x: {'type': 'object'} for x in RPCMessageType},
# Below -> Deprecated
'webhookentry': {'type': 'object'},
'webhookentrycancel': {'type': 'object'},
diff --git a/freqtrade/data/history/history_utils.py b/freqtrade/data/history/history_utils.py
index e61f59cfa..cca2cfa72 100644
--- a/freqtrade/data/history/history_utils.py
+++ b/freqtrade/data/history/history_utils.py
@@ -14,8 +14,8 @@ from freqtrade.data.history.idatahandler import IDataHandler, get_datahandler
from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import Exchange
-from freqtrade.misc import format_ms_time
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
+from freqtrade.util import format_ms_time
from freqtrade.util.binance_mig import migrate_binance_futures_data
@@ -354,7 +354,7 @@ def _download_trades_history(exchange: Exchange,
trades = []
if not since:
- since = int((datetime.now() - timedelta(days=-new_pairs_days)).timestamp()) * 1000
+ since = int((datetime.now() - timedelta(days=new_pairs_days)).timestamp()) * 1000
from_id = trades[-1][1] if trades else None
if trades and since < trades[-1][0]:
diff --git a/freqtrade/freqai/base_models/BaseClassifierModel.py b/freqtrade/freqai/base_models/BaseClassifierModel.py
index f35b07e66..42b5c1a0e 100644
--- a/freqtrade/freqai/base_models/BaseClassifierModel.py
+++ b/freqtrade/freqai/base_models/BaseClassifierModel.py
@@ -120,7 +120,7 @@ class BaseClassifierModel(IFreqaiModel):
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
- dk.DI_values = np.zeros(len(outliers.index))
- dk.do_predict = outliers.to_numpy()
+ dk.DI_values = np.zeros(outliers.shape[0])
+ dk.do_predict = outliers
return (pred_df, dk.do_predict)
diff --git a/freqtrade/freqai/base_models/BasePyTorchClassifier.py b/freqtrade/freqai/base_models/BasePyTorchClassifier.py
index c47c5069a..4780af818 100644
--- a/freqtrade/freqai/base_models/BasePyTorchClassifier.py
+++ b/freqtrade/freqai/base_models/BasePyTorchClassifier.py
@@ -94,8 +94,8 @@ class BasePyTorchClassifier(BasePyTorchModel):
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
- dk.DI_values = np.zeros(len(outliers.index))
- dk.do_predict = outliers.to_numpy()
+ dk.DI_values = np.zeros(outliers.shape[0])
+ dk.do_predict = outliers
return (pred_df, dk.do_predict)
diff --git a/freqtrade/freqai/base_models/BasePyTorchRegressor.py b/freqtrade/freqai/base_models/BasePyTorchRegressor.py
index 325743134..83fea4ef9 100644
--- a/freqtrade/freqai/base_models/BasePyTorchRegressor.py
+++ b/freqtrade/freqai/base_models/BasePyTorchRegressor.py
@@ -55,8 +55,8 @@ class BasePyTorchRegressor(BasePyTorchModel):
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
- dk.DI_values = np.zeros(len(outliers.index))
- dk.do_predict = outliers.to_numpy()
+ dk.DI_values = np.zeros(outliers.shape[0])
+ dk.do_predict = outliers
return (pred_df, dk.do_predict)
def train(
diff --git a/freqtrade/freqai/base_models/BaseRegressionModel.py b/freqtrade/freqai/base_models/BaseRegressionModel.py
index 2e07d3fb7..179e4be87 100644
--- a/freqtrade/freqai/base_models/BaseRegressionModel.py
+++ b/freqtrade/freqai/base_models/BaseRegressionModel.py
@@ -114,7 +114,7 @@ class BaseRegressionModel(IFreqaiModel):
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
- dk.DI_values = np.zeros(len(outliers.index))
- dk.do_predict = outliers.to_numpy()
+ dk.DI_values = np.zeros(outliers.shape[0])
+ dk.do_predict = outliers
return (pred_df, dk.do_predict)
diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py
index 4ca5467b6..36c94130c 100644
--- a/freqtrade/freqai/freqai_interface.py
+++ b/freqtrade/freqai/freqai_interface.py
@@ -515,7 +515,7 @@ class IFreqaiModel(ABC):
]
if ft_params.get("principal_component_analysis", False):
- pipe_steps.append(('pca', ds.PCA()))
+ pipe_steps.append(('pca', ds.PCA(n_components=0.999)))
pipe_steps.append(('post-pca-scaler',
SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1)))))
@@ -1012,6 +1012,6 @@ class IFreqaiModel(ABC):
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
- dk.DI_values = np.zeros(len(outliers.index))
- dk.do_predict = outliers.to_numpy()
+ dk.DI_values = np.zeros(outliers.shape[0])
+ dk.do_predict = outliers
return
diff --git a/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py b/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py
index bf78488ff..a76bab05c 100644
--- a/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py
+++ b/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py
@@ -136,8 +136,8 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor):
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
- dk.DI_values = np.zeros(len(outliers.index))
- dk.do_predict = outliers.to_numpy()
+ dk.DI_values = np.zeros(outliers.shape[0])
+ dk.do_predict = outliers
if x.shape[1] > 1:
zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))),
diff --git a/freqtrade/misc.py b/freqtrade/misc.py
index 1e84bba87..350ac5eef 100644
--- a/freqtrade/misc.py
+++ b/freqtrade/misc.py
@@ -3,7 +3,6 @@ Various tool function for Freqtrade and scripts
"""
import gzip
import logging
-from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
from urllib.parse import urlparse
@@ -123,14 +122,6 @@ def pair_to_filename(pair: str) -> str:
return pair
-def format_ms_time(date: int) -> str:
- """
- convert MS date to readable format.
- : epoch-string in ms
- """
- return datetime.fromtimestamp(date / 1000.0).strftime('%Y-%m-%dT%H:%M:%S')
-
-
def deep_merge_dicts(source, destination, allow_null_overrides: bool = True):
"""
Values from Source override destination, destination is returned (and modified!!)
diff --git a/freqtrade/mixins/logging_mixin.py b/freqtrade/mixins/logging_mixin.py
index 06935d5f6..31b49ba55 100644
--- a/freqtrade/mixins/logging_mixin.py
+++ b/freqtrade/mixins/logging_mixin.py
@@ -3,7 +3,7 @@ from typing import Callable
from cachetools import TTLCache, cached
-class LoggingMixin():
+class LoggingMixin:
"""
Logging Mixin
Shows similar messages only once every `refresh_period`.
diff --git a/freqtrade/optimize/hyperopt_tools.py b/freqtrade/optimize/hyperopt_tools.py
index 1e7befdf6..bc5b85309 100644
--- a/freqtrade/optimize/hyperopt_tools.py
+++ b/freqtrade/optimize/hyperopt_tools.py
@@ -35,7 +35,7 @@ def hyperopt_serializer(x):
return str(x)
-class HyperoptStateContainer():
+class HyperoptStateContainer:
""" Singleton class to track state of hyperopt"""
state: HyperoptState = HyperoptState.OPTIMIZE
@@ -44,7 +44,7 @@ class HyperoptStateContainer():
cls.state = value
-class HyperoptTools():
+class HyperoptTools:
@staticmethod
def get_strategy_filename(config: Config, strategy_name: str) -> Optional[Path]:
diff --git a/freqtrade/optimize/optimize_reports/__init__.py b/freqtrade/optimize/optimize_reports/__init__.py
new file mode 100644
index 000000000..68e222d00
--- /dev/null
+++ b/freqtrade/optimize/optimize_reports/__init__.py
@@ -0,0 +1,18 @@
+# flake8: noqa: F401
+from freqtrade.optimize.optimize_reports.bt_output import (generate_edge_table,
+ show_backtest_result,
+ show_backtest_results,
+ show_sorted_pairlist,
+ text_table_add_metrics,
+ text_table_bt_results,
+ text_table_exit_reason,
+ text_table_periodic_breakdown,
+ text_table_strategy, text_table_tags)
+from freqtrade.optimize.optimize_reports.bt_storage import (store_backtest_analysis_results,
+ store_backtest_stats)
+from freqtrade.optimize.optimize_reports.optimize_reports import (
+ generate_all_periodic_breakdown_stats, generate_backtest_stats, generate_daily_stats,
+ generate_exit_reason_stats, generate_pair_metrics, generate_periodic_breakdown_stats,
+ generate_rejected_signals, generate_strategy_comparison, generate_strategy_stats,
+ generate_tag_metrics, generate_trade_signal_candles, generate_trading_stats,
+ generate_wins_draws_losses)
diff --git a/freqtrade/optimize/optimize_reports/bt_output.py b/freqtrade/optimize/optimize_reports/bt_output.py
new file mode 100644
index 000000000..1fd1f7a34
--- /dev/null
+++ b/freqtrade/optimize/optimize_reports/bt_output.py
@@ -0,0 +1,405 @@
+import logging
+from typing import Any, Dict, List
+
+from tabulate import tabulate
+
+from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config
+from freqtrade.misc import decimals_per_coin, round_coin_value
+from freqtrade.optimize.optimize_reports.optimize_reports import (generate_periodic_breakdown_stats,
+ generate_wins_draws_losses)
+
+
+logger = logging.getLogger(__name__)
+
+
+def _get_line_floatfmt(stake_currency: str) -> List[str]:
+ """
+ Generate floatformat (goes in line with _generate_result_line())
+ """
+ return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
+ '.2f', 'd', 's', 's']
+
+
+def _get_line_header(first_column: str, stake_currency: str,
+ direction: str = 'Entries') -> List[str]:
+ """
+ Generate header lines (goes in line with _generate_result_line())
+ """
+ return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
+ f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
+ 'Win Draw Loss Win%']
+
+
+def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
+ """
+ Generates and returns a text table for the given backtest data and the results dataframe
+ :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
+ :param stake_currency: stake-currency - used to correctly name headers
+ :return: pretty printed table with tabulate as string
+ """
+
+ headers = _get_line_header('Pair', stake_currency)
+ floatfmt = _get_line_floatfmt(stake_currency)
+ output = [[
+ t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
+ t['profit_total_pct'], t['duration_avg'],
+ generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
+ ] for t in pair_results]
+ # Ignore type as floatfmt does allow tuples but mypy does not know that
+ return tabulate(output, headers=headers,
+ floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
+
+
+def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
+ """
+ Generate small table outlining Backtest results
+ :param sell_reason_stats: Exit reason metrics
+ :param stake_currency: Stakecurrency used
+ :return: pretty printed table with tabulate as string
+ """
+ headers = [
+ 'Exit Reason',
+ 'Exits',
+ 'Win Draws Loss Win%',
+ 'Avg Profit %',
+ 'Cum Profit %',
+ f'Tot Profit {stake_currency}',
+ 'Tot Profit %',
+ ]
+
+ output = [[
+ t.get('exit_reason', t.get('sell_reason')), t['trades'],
+ generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
+ t['profit_mean_pct'], t['profit_sum_pct'],
+ round_coin_value(t['profit_total_abs'], stake_currency, False),
+ t['profit_total_pct'],
+ ] for t in exit_reason_stats]
+ return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
+
+
+def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
+ """
+ Generates and returns a text table for the given backtest data and the results dataframe
+ :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
+ :param stake_currency: stake-currency - used to correctly name headers
+ :return: pretty printed table with tabulate as string
+ """
+ if (tag_type == "enter_tag"):
+ headers = _get_line_header("TAG", stake_currency)
+ else:
+ headers = _get_line_header("TAG", stake_currency, 'Exits')
+ floatfmt = _get_line_floatfmt(stake_currency)
+ output = [
+ [
+ t['key'] if t['key'] is not None and len(
+ t['key']) > 0 else "OTHER",
+ t['trades'],
+ t['profit_mean_pct'],
+ t['profit_sum_pct'],
+ t['profit_total_abs'],
+ t['profit_total_pct'],
+ t['duration_avg'],
+ generate_wins_draws_losses(
+ t['wins'],
+ t['draws'],
+ t['losses'])] for t in tag_results]
+ # Ignore type as floatfmt does allow tuples but mypy does not know that
+ return tabulate(output, headers=headers,
+ floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
+
+
+def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
+ stake_currency: str, period: str) -> str:
+ """
+ Generate small table with Backtest results by days
+ :param days_breakdown_stats: Days breakdown metrics
+ :param stake_currency: Stakecurrency used
+ :return: pretty printed table with tabulate as string
+ """
+ headers = [
+ period.capitalize(),
+ f'Tot Profit {stake_currency}',
+ 'Wins',
+ 'Draws',
+ 'Losses',
+ ]
+ output = [[
+ d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
+ d['wins'], d['draws'], d['loses'],
+ ] for d in days_breakdown_stats]
+ return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
+
+
+def text_table_strategy(strategy_results, stake_currency: str) -> str:
+ """
+ Generate summary table per strategy
+ :param strategy_results: Dict of containing results for all strategies
+ :param stake_currency: stake-currency - used to correctly name headers
+ :return: pretty printed table with tabulate as string
+ """
+ floatfmt = _get_line_floatfmt(stake_currency)
+ headers = _get_line_header('Strategy', stake_currency)
+ # _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
+ # therefore we slip this column in only for strategy summary here.
+ headers.append('Drawdown')
+
+ # Align drawdown string on the center two space separator.
+ if 'max_drawdown_account' in strategy_results[0]:
+ drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
+ else:
+ # Support for prior backtest results
+ drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
+
+ dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
+ dd_pad_per = max([len(dd) for dd in drawdown])
+ drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
+ for t, dd in zip(strategy_results, drawdown)]
+
+ output = [[
+ t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
+ t['profit_total_pct'], t['duration_avg'],
+ generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
+ for t, drawdown in zip(strategy_results, drawdown)]
+ # Ignore type as floatfmt does allow tuples but mypy does not know that
+ return tabulate(output, headers=headers,
+ floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
+
+
+def text_table_add_metrics(strat_results: Dict) -> str:
+ if len(strat_results['trades']) > 0:
+ best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
+ worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
+
+ short_metrics = [
+ ('', ''), # Empty line to improve readability
+ ('Long / Short',
+ f"{strat_results.get('trade_count_long', 'total_trades')} / "
+ f"{strat_results.get('trade_count_short', 0)}"),
+ ('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
+ ('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
+ ('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
+ strat_results['stake_currency'])),
+ ('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
+ strat_results['stake_currency'])),
+ ] if strat_results.get('trade_count_short', 0) > 0 else []
+
+ drawdown_metrics = []
+ if 'max_relative_drawdown' in strat_results:
+ # Compatibility to show old hyperopt results
+ drawdown_metrics.append(
+ ('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
+ )
+ drawdown_metrics.extend([
+ ('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
+ if 'max_drawdown_account' in strat_results else (
+ 'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
+ ('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
+ strat_results['stake_currency'])),
+ ('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
+ strat_results['stake_currency'])),
+ ('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
+ strat_results['stake_currency'])),
+ ('Drawdown Start', strat_results['drawdown_start']),
+ ('Drawdown End', strat_results['drawdown_end']),
+ ])
+
+ entry_adjustment_metrics = [
+ ('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
+ ('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
+ ('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
+ ] if strat_results.get('canceled_entry_orders', 0) > 0 else []
+
+ # Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
+ # command stores these results and newer version of freqtrade must be able to handle old
+ # results with missing new fields.
+ metrics = [
+ ('Backtesting from', strat_results['backtest_start']),
+ ('Backtesting to', strat_results['backtest_end']),
+ ('Max open trades', strat_results['max_open_trades']),
+ ('', ''), # Empty line to improve readability
+ ('Total/Daily Avg Trades',
+ f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
+
+ ('Starting balance', round_coin_value(strat_results['starting_balance'],
+ strat_results['stake_currency'])),
+ ('Final balance', round_coin_value(strat_results['final_balance'],
+ strat_results['stake_currency'])),
+ ('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
+ strat_results['stake_currency'])),
+ ('Total profit %', f"{strat_results['profit_total']:.2%}"),
+ ('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
+ ('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
+ ('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
+ ('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
+ ('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
+ in strat_results else 'N/A'),
+ ('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy'
+ in strat_results else 'N/A'),
+ ('Trades per day', strat_results['trades_per_day']),
+ ('Avg. daily profit %',
+ f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
+ ('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
+ strat_results['stake_currency'])),
+ ('Total trade volume', round_coin_value(strat_results['total_volume'],
+ strat_results['stake_currency'])),
+ *short_metrics,
+ ('', ''), # Empty line to improve readability
+ ('Best Pair', f"{strat_results['best_pair']['key']} "
+ f"{strat_results['best_pair']['profit_sum']:.2%}"),
+ ('Worst Pair', f"{strat_results['worst_pair']['key']} "
+ f"{strat_results['worst_pair']['profit_sum']:.2%}"),
+ ('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
+ ('Worst trade', f"{worst_trade['pair']} "
+ f"{worst_trade['profit_ratio']:.2%}"),
+
+ ('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
+ strat_results['stake_currency'])),
+ ('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
+ strat_results['stake_currency'])),
+ ('Days win/draw/lose', f"{strat_results['winning_days']} / "
+ f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
+ ('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
+ ('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
+ ('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
+ ('Entry/Exit Timeouts',
+ f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
+ f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
+ *entry_adjustment_metrics,
+ ('', ''), # Empty line to improve readability
+
+ ('Min balance', round_coin_value(strat_results['csum_min'],
+ strat_results['stake_currency'])),
+ ('Max balance', round_coin_value(strat_results['csum_max'],
+ strat_results['stake_currency'])),
+
+ *drawdown_metrics,
+ ('Market change', f"{strat_results['market_change']:.2%}"),
+ ]
+
+ return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
+ else:
+ start_balance = round_coin_value(strat_results['starting_balance'],
+ strat_results['stake_currency'])
+ stake_amount = round_coin_value(
+ strat_results['stake_amount'], strat_results['stake_currency']
+ ) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
+
+ message = ("No trades made. "
+ f"Your starting balance was {start_balance}, "
+ f"and your stake was {stake_amount}."
+ )
+ return message
+
+
+def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
+ backtest_breakdown=[]):
+ """
+ Print results for one strategy
+ """
+ # Print results
+ print(f"Result for strategy {strategy}")
+ table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
+ if isinstance(table, str):
+ print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
+ print(table)
+
+ table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
+ if isinstance(table, str) and len(table) > 0:
+ print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
+ print(table)
+
+ if (results.get('results_per_enter_tag') is not None
+ or results.get('results_per_buy_tag') is not None):
+ # results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
+ table = text_table_tags(
+ "enter_tag",
+ results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
+ stake_currency=stake_currency)
+
+ if isinstance(table, str) and len(table) > 0:
+ print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
+ print(table)
+
+ exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
+ table = text_table_exit_reason(exit_reason_stats=exit_reasons,
+ stake_currency=stake_currency)
+ if isinstance(table, str) and len(table) > 0:
+ print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
+ print(table)
+
+ for period in backtest_breakdown:
+ if period in results.get('periodic_breakdown', {}):
+ days_breakdown_stats = results['periodic_breakdown'][period]
+ else:
+ days_breakdown_stats = generate_periodic_breakdown_stats(
+ trade_list=results['trades'], period=period)
+ table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
+ stake_currency=stake_currency, period=period)
+ if isinstance(table, str) and len(table) > 0:
+ print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
+ print(table)
+
+ table = text_table_add_metrics(results)
+ if isinstance(table, str) and len(table) > 0:
+ print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
+ print(table)
+
+ if isinstance(table, str) and len(table) > 0:
+ print('=' * len(table.splitlines()[0]))
+
+ print()
+
+
+def show_backtest_results(config: Config, backtest_stats: Dict):
+ stake_currency = config['stake_currency']
+
+ for strategy, results in backtest_stats['strategy'].items():
+ show_backtest_result(
+ strategy, results, stake_currency,
+ config.get('backtest_breakdown', []))
+
+ if len(backtest_stats['strategy']) > 0:
+ # Print Strategy summary table
+
+ table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
+ print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
+ f" Max open trades : {results['max_open_trades']}")
+ print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
+ print(table)
+ print('=' * len(table.splitlines()[0]))
+ print('\nFor more details, please look at the detail tables above')
+
+
+def show_sorted_pairlist(config: Config, backtest_stats: Dict):
+ if config.get('backtest_show_pair_list', False):
+ for strategy, results in backtest_stats['strategy'].items():
+ print(f"Pairs for Strategy {strategy}: \n[")
+ for result in results['results_per_pair']:
+ if result["key"] != 'TOTAL':
+ print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
+ print("]")
+
+
+def generate_edge_table(results: dict) -> str:
+ floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
+ tabular_data = []
+ headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
+ 'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
+ 'Average Duration (min)']
+
+ for result in results.items():
+ if result[1].nb_trades > 0:
+ tabular_data.append([
+ result[0],
+ result[1].stoploss,
+ result[1].winrate,
+ result[1].risk_reward_ratio,
+ result[1].required_risk_reward,
+ result[1].expectancy,
+ result[1].nb_trades,
+ round(result[1].avg_trade_duration)
+ ])
+
+ # Ignore type as floatfmt does allow tuples but mypy does not know that
+ return tabulate(tabular_data, headers=headers,
+ floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
diff --git a/freqtrade/optimize/optimize_reports/bt_storage.py b/freqtrade/optimize/optimize_reports/bt_storage.py
new file mode 100644
index 000000000..af97753e3
--- /dev/null
+++ b/freqtrade/optimize/optimize_reports/bt_storage.py
@@ -0,0 +1,71 @@
+import logging
+from pathlib import Path
+from typing import Dict
+
+from pandas import DataFrame
+
+from freqtrade.constants import LAST_BT_RESULT_FN
+from freqtrade.misc import file_dump_joblib, file_dump_json
+from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
+
+
+logger = logging.getLogger(__name__)
+
+
+def store_backtest_stats(
+ recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
+ """
+ Stores backtest results
+ :param recordfilename: Path object, which can either be a filename or a directory.
+ Filenames will be appended with a timestamp right before the suffix
+ while for directories, /backtest-result-.json will be used as filename
+ :param stats: Dataframe containing the backtesting statistics
+ :param dtappendix: Datetime to use for the filename
+ """
+ if recordfilename.is_dir():
+ filename = (recordfilename / f'backtest-result-{dtappendix}.json')
+ else:
+ filename = Path.joinpath(
+ recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
+ ).with_suffix(recordfilename.suffix)
+
+ # Store metadata separately.
+ file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
+ del stats['metadata']
+
+ file_dump_json(filename, stats)
+
+ latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
+ file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
+
+
+def _store_backtest_analysis_data(
+ recordfilename: Path, data: Dict[str, Dict],
+ dtappendix: str, name: str) -> Path:
+ """
+ Stores backtest trade candles for analysis
+ :param recordfilename: Path object, which can either be a filename or a directory.
+ Filenames will be appended with a timestamp right before the suffix
+ while for directories, /backtest-result-_.pkl will be used
+ as filename
+ :param candles: Dict containing the backtesting data for analysis
+ :param dtappendix: Datetime to use for the filename
+ :param name: Name to use for the file, e.g. signals, rejected
+ """
+ if recordfilename.is_dir():
+ filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
+ else:
+ filename = Path.joinpath(
+ recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
+ )
+
+ file_dump_joblib(filename, data)
+
+ return filename
+
+
+def store_backtest_analysis_results(
+ recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
+ dtappendix: str) -> None:
+ _store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
+ _store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
diff --git a/freqtrade/optimize/optimize_reports.py b/freqtrade/optimize/optimize_reports/optimize_reports.py
similarity index 51%
rename from freqtrade/optimize/optimize_reports.py
rename to freqtrade/optimize/optimize_reports/optimize_reports.py
index e60047a79..015f163e3 100644
--- a/freqtrade/optimize/optimize_reports.py
+++ b/freqtrade/optimize/optimize_reports/optimize_reports.py
@@ -1,83 +1,20 @@
import logging
from copy import deepcopy
from datetime import datetime, timedelta, timezone
-from pathlib import Path
from typing import Any, Dict, List, Union
from pandas import DataFrame, concat, to_datetime
-from tabulate import tabulate
-from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
- UNLIMITED_STAKE_AMOUNT, Config, IntOrInf)
+from freqtrade.constants import BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, IntOrInf
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
calculate_expectancy, calculate_market_change,
calculate_max_drawdown, calculate_sharpe, calculate_sortino)
-from freqtrade.misc import decimals_per_coin, file_dump_joblib, file_dump_json, round_coin_value
-from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
+from freqtrade.misc import decimals_per_coin, round_coin_value
logger = logging.getLogger(__name__)
-def store_backtest_stats(
- recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
- """
- Stores backtest results
- :param recordfilename: Path object, which can either be a filename or a directory.
- Filenames will be appended with a timestamp right before the suffix
- while for directories, /backtest-result-.json will be used as filename
- :param stats: Dataframe containing the backtesting statistics
- :param dtappendix: Datetime to use for the filename
- """
- if recordfilename.is_dir():
- filename = (recordfilename / f'backtest-result-{dtappendix}.json')
- else:
- filename = Path.joinpath(
- recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
- ).with_suffix(recordfilename.suffix)
-
- # Store metadata separately.
- file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
- del stats['metadata']
-
- file_dump_json(filename, stats)
-
- latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
- file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
-
-
-def _store_backtest_analysis_data(
- recordfilename: Path, data: Dict[str, Dict],
- dtappendix: str, name: str) -> Path:
- """
- Stores backtest trade candles for analysis
- :param recordfilename: Path object, which can either be a filename or a directory.
- Filenames will be appended with a timestamp right before the suffix
- while for directories, /backtest-result-_.pkl will be used
- as filename
- :param candles: Dict containing the backtesting data for analysis
- :param dtappendix: Datetime to use for the filename
- :param name: Name to use for the file, e.g. signals, rejected
- """
- if recordfilename.is_dir():
- filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
- else:
- filename = Path.joinpath(
- recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
- )
-
- file_dump_joblib(filename, data)
-
- return filename
-
-
-def store_backtest_analysis_results(
- recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
- dtappendix: str) -> None:
- _store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
- _store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
-
-
def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame],
bt_results: Dict[str, Any]) -> DataFrame:
signal_candles_only = {}
@@ -120,24 +57,6 @@ def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame],
return rejected_candles_only
-def _get_line_floatfmt(stake_currency: str) -> List[str]:
- """
- Generate floatformat (goes in line with _generate_result_line())
- """
- return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
- '.2f', 'd', 's', 's']
-
-
-def _get_line_header(first_column: str, stake_currency: str,
- direction: str = 'Entries') -> List[str]:
- """
- Generate header lines (goes in line with _generate_result_line())
- """
- return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
- f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
- 'Win Draw Loss Win%']
-
-
def generate_wins_draws_losses(wins, draws, losses):
if wins > 0 and losses == 0:
wl_ratio = '100'
@@ -295,31 +214,6 @@ def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]:
return tabular_data
-def generate_edge_table(results: dict) -> str:
- floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
- tabular_data = []
- headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
- 'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
- 'Average Duration (min)']
-
- for result in results.items():
- if result[1].nb_trades > 0:
- tabular_data.append([
- result[0],
- result[1].stoploss,
- result[1].winrate,
- result[1].risk_reward_ratio,
- result[1].required_risk_reward,
- result[1].expectancy,
- result[1].nb_trades,
- round(result[1].avg_trade_duration)
- ])
-
- # Ignore type as floatfmt does allow tuples but mypy does not know that
- return tabulate(tabular_data, headers=headers,
- floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
-
-
def _get_resample_from_period(period: str) -> str:
if period == 'day':
return '1d'
@@ -652,357 +546,3 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
result['strategy_comparison'] = strategy_results
return result
-
-
-###
-# Start output section
-###
-
-def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
- """
- Generates and returns a text table for the given backtest data and the results dataframe
- :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
- :param stake_currency: stake-currency - used to correctly name headers
- :return: pretty printed table with tabulate as string
- """
-
- headers = _get_line_header('Pair', stake_currency)
- floatfmt = _get_line_floatfmt(stake_currency)
- output = [[
- t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
- t['profit_total_pct'], t['duration_avg'],
- generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
- ] for t in pair_results]
- # Ignore type as floatfmt does allow tuples but mypy does not know that
- return tabulate(output, headers=headers,
- floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
-
-
-def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
- """
- Generate small table outlining Backtest results
- :param sell_reason_stats: Exit reason metrics
- :param stake_currency: Stakecurrency used
- :return: pretty printed table with tabulate as string
- """
- headers = [
- 'Exit Reason',
- 'Exits',
- 'Win Draws Loss Win%',
- 'Avg Profit %',
- 'Cum Profit %',
- f'Tot Profit {stake_currency}',
- 'Tot Profit %',
- ]
-
- output = [[
- t.get('exit_reason', t.get('sell_reason')), t['trades'],
- generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
- t['profit_mean_pct'], t['profit_sum_pct'],
- round_coin_value(t['profit_total_abs'], stake_currency, False),
- t['profit_total_pct'],
- ] for t in exit_reason_stats]
- return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
-
-
-def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
- """
- Generates and returns a text table for the given backtest data and the results dataframe
- :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
- :param stake_currency: stake-currency - used to correctly name headers
- :return: pretty printed table with tabulate as string
- """
- if (tag_type == "enter_tag"):
- headers = _get_line_header("TAG", stake_currency)
- else:
- headers = _get_line_header("TAG", stake_currency, 'Exits')
- floatfmt = _get_line_floatfmt(stake_currency)
- output = [
- [
- t['key'] if t['key'] is not None and len(
- t['key']) > 0 else "OTHER",
- t['trades'],
- t['profit_mean_pct'],
- t['profit_sum_pct'],
- t['profit_total_abs'],
- t['profit_total_pct'],
- t['duration_avg'],
- generate_wins_draws_losses(
- t['wins'],
- t['draws'],
- t['losses'])] for t in tag_results]
- # Ignore type as floatfmt does allow tuples but mypy does not know that
- return tabulate(output, headers=headers,
- floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
-
-
-def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
- stake_currency: str, period: str) -> str:
- """
- Generate small table with Backtest results by days
- :param days_breakdown_stats: Days breakdown metrics
- :param stake_currency: Stakecurrency used
- :return: pretty printed table with tabulate as string
- """
- headers = [
- period.capitalize(),
- f'Tot Profit {stake_currency}',
- 'Wins',
- 'Draws',
- 'Losses',
- ]
- output = [[
- d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
- d['wins'], d['draws'], d['loses'],
- ] for d in days_breakdown_stats]
- return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
-
-
-def text_table_strategy(strategy_results, stake_currency: str) -> str:
- """
- Generate summary table per strategy
- :param strategy_results: Dict of containing results for all strategies
- :param stake_currency: stake-currency - used to correctly name headers
- :return: pretty printed table with tabulate as string
- """
- floatfmt = _get_line_floatfmt(stake_currency)
- headers = _get_line_header('Strategy', stake_currency)
- # _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
- # therefore we slip this column in only for strategy summary here.
- headers.append('Drawdown')
-
- # Align drawdown string on the center two space separator.
- if 'max_drawdown_account' in strategy_results[0]:
- drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
- else:
- # Support for prior backtest results
- drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
-
- dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
- dd_pad_per = max([len(dd) for dd in drawdown])
- drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
- for t, dd in zip(strategy_results, drawdown)]
-
- output = [[
- t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
- t['profit_total_pct'], t['duration_avg'],
- generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
- for t, drawdown in zip(strategy_results, drawdown)]
- # Ignore type as floatfmt does allow tuples but mypy does not know that
- return tabulate(output, headers=headers,
- floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
-
-
-def text_table_add_metrics(strat_results: Dict) -> str:
- if len(strat_results['trades']) > 0:
- best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
- worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
-
- short_metrics = [
- ('', ''), # Empty line to improve readability
- ('Long / Short',
- f"{strat_results.get('trade_count_long', 'total_trades')} / "
- f"{strat_results.get('trade_count_short', 0)}"),
- ('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
- ('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
- ('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
- strat_results['stake_currency'])),
- ('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
- strat_results['stake_currency'])),
- ] if strat_results.get('trade_count_short', 0) > 0 else []
-
- drawdown_metrics = []
- if 'max_relative_drawdown' in strat_results:
- # Compatibility to show old hyperopt results
- drawdown_metrics.append(
- ('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
- )
- drawdown_metrics.extend([
- ('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
- if 'max_drawdown_account' in strat_results else (
- 'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
- ('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
- strat_results['stake_currency'])),
- ('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
- strat_results['stake_currency'])),
- ('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
- strat_results['stake_currency'])),
- ('Drawdown Start', strat_results['drawdown_start']),
- ('Drawdown End', strat_results['drawdown_end']),
- ])
-
- entry_adjustment_metrics = [
- ('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
- ('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
- ('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
- ] if strat_results.get('canceled_entry_orders', 0) > 0 else []
-
- # Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
- # command stores these results and newer version of freqtrade must be able to handle old
- # results with missing new fields.
- metrics = [
- ('Backtesting from', strat_results['backtest_start']),
- ('Backtesting to', strat_results['backtest_end']),
- ('Max open trades', strat_results['max_open_trades']),
- ('', ''), # Empty line to improve readability
- ('Total/Daily Avg Trades',
- f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
-
- ('Starting balance', round_coin_value(strat_results['starting_balance'],
- strat_results['stake_currency'])),
- ('Final balance', round_coin_value(strat_results['final_balance'],
- strat_results['stake_currency'])),
- ('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
- strat_results['stake_currency'])),
- ('Total profit %', f"{strat_results['profit_total']:.2%}"),
- ('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
- ('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
- ('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
- ('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
- ('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
- in strat_results else 'N/A'),
- ('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy'
- in strat_results else 'N/A'),
- ('Trades per day', strat_results['trades_per_day']),
- ('Avg. daily profit %',
- f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
- ('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
- strat_results['stake_currency'])),
- ('Total trade volume', round_coin_value(strat_results['total_volume'],
- strat_results['stake_currency'])),
- *short_metrics,
- ('', ''), # Empty line to improve readability
- ('Best Pair', f"{strat_results['best_pair']['key']} "
- f"{strat_results['best_pair']['profit_sum']:.2%}"),
- ('Worst Pair', f"{strat_results['worst_pair']['key']} "
- f"{strat_results['worst_pair']['profit_sum']:.2%}"),
- ('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
- ('Worst trade', f"{worst_trade['pair']} "
- f"{worst_trade['profit_ratio']:.2%}"),
-
- ('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
- strat_results['stake_currency'])),
- ('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
- strat_results['stake_currency'])),
- ('Days win/draw/lose', f"{strat_results['winning_days']} / "
- f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
- ('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
- ('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
- ('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
- ('Entry/Exit Timeouts',
- f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
- f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
- *entry_adjustment_metrics,
- ('', ''), # Empty line to improve readability
-
- ('Min balance', round_coin_value(strat_results['csum_min'],
- strat_results['stake_currency'])),
- ('Max balance', round_coin_value(strat_results['csum_max'],
- strat_results['stake_currency'])),
-
- *drawdown_metrics,
- ('Market change', f"{strat_results['market_change']:.2%}"),
- ]
-
- return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
- else:
- start_balance = round_coin_value(strat_results['starting_balance'],
- strat_results['stake_currency'])
- stake_amount = round_coin_value(
- strat_results['stake_amount'], strat_results['stake_currency']
- ) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
-
- message = ("No trades made. "
- f"Your starting balance was {start_balance}, "
- f"and your stake was {stake_amount}."
- )
- return message
-
-
-def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
- backtest_breakdown=[]):
- """
- Print results for one strategy
- """
- # Print results
- print(f"Result for strategy {strategy}")
- table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
- if isinstance(table, str):
- print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
- print(table)
-
- table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
- if isinstance(table, str) and len(table) > 0:
- print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
- print(table)
-
- if (results.get('results_per_enter_tag') is not None
- or results.get('results_per_buy_tag') is not None):
- # results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
- table = text_table_tags(
- "enter_tag",
- results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
- stake_currency=stake_currency)
-
- if isinstance(table, str) and len(table) > 0:
- print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
- print(table)
-
- exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
- table = text_table_exit_reason(exit_reason_stats=exit_reasons,
- stake_currency=stake_currency)
- if isinstance(table, str) and len(table) > 0:
- print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
- print(table)
-
- for period in backtest_breakdown:
- if period in results.get('periodic_breakdown', {}):
- days_breakdown_stats = results['periodic_breakdown'][period]
- else:
- days_breakdown_stats = generate_periodic_breakdown_stats(
- trade_list=results['trades'], period=period)
- table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
- stake_currency=stake_currency, period=period)
- if isinstance(table, str) and len(table) > 0:
- print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
- print(table)
-
- table = text_table_add_metrics(results)
- if isinstance(table, str) and len(table) > 0:
- print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
- print(table)
-
- if isinstance(table, str) and len(table) > 0:
- print('=' * len(table.splitlines()[0]))
-
- print()
-
-
-def show_backtest_results(config: Config, backtest_stats: Dict):
- stake_currency = config['stake_currency']
-
- for strategy, results in backtest_stats['strategy'].items():
- show_backtest_result(
- strategy, results, stake_currency,
- config.get('backtest_breakdown', []))
-
- if len(backtest_stats['strategy']) > 0:
- # Print Strategy summary table
-
- table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
- print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
- f" Max open trades : {results['max_open_trades']}")
- print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
- print(table)
- print('=' * len(table.splitlines()[0]))
- print('\nFor more details, please look at the detail tables above')
-
-
-def show_sorted_pairlist(config: Config, backtest_stats: Dict):
- if config.get('backtest_show_pair_list', False):
- for strategy, results in backtest_stats['strategy'].items():
- print(f"Pairs for Strategy {strategy}: \n[")
- for result in results['results_per_pair']:
- if result["key"] != 'TOTAL':
- print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
- print("]")
diff --git a/freqtrade/persistence/key_value_store.py b/freqtrade/persistence/key_value_store.py
index 110a23d6c..6da7265d6 100644
--- a/freqtrade/persistence/key_value_store.py
+++ b/freqtrade/persistence/key_value_store.py
@@ -42,7 +42,7 @@ class _KeyValueStoreModel(ModelBase):
int_value: Mapped[Optional[int]]
-class KeyValueStore():
+class KeyValueStore:
"""
Generic bot-wide, persistent key-value store
Can be used to store generic values, e.g. very first bot startup time.
diff --git a/freqtrade/persistence/pairlock_middleware.py b/freqtrade/persistence/pairlock_middleware.py
index 29169a50d..dd6bacf3a 100644
--- a/freqtrade/persistence/pairlock_middleware.py
+++ b/freqtrade/persistence/pairlock_middleware.py
@@ -11,7 +11,7 @@ from freqtrade.persistence.models import PairLock
logger = logging.getLogger(__name__)
-class PairLocks():
+class PairLocks:
"""
Pairlocks middleware class
Abstracts the database layer away so it becomes optional - which will be necessary to support
diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py
index 4d3564df6..35a44e3fc 100644
--- a/freqtrade/persistence/trade_model.py
+++ b/freqtrade/persistence/trade_model.py
@@ -283,7 +283,7 @@ class Order(ModelBase):
return Order.session.scalars(select(Order).filter(Order.order_id == order_id)).first()
-class LocalTrade():
+class LocalTrade:
"""
Trade database model.
Used in backtesting - must be aligned to Trade model!
diff --git a/freqtrade/plugins/pairlist/VolumePairList.py b/freqtrade/plugins/pairlist/VolumePairList.py
index 0d5e33847..9e4a4fca9 100644
--- a/freqtrade/plugins/pairlist/VolumePairList.py
+++ b/freqtrade/plugins/pairlist/VolumePairList.py
@@ -13,9 +13,8 @@ from freqtrade.constants import Config, ListPairsWithTimeframes
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_prev_date
from freqtrade.exchange.types import Tickers
-from freqtrade.misc import format_ms_time
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
-from freqtrade.util import dt_now
+from freqtrade.util import dt_now, format_ms_time
logger = logging.getLogger(__name__)
diff --git a/freqtrade/plugins/protectionmanager.py b/freqtrade/plugins/protectionmanager.py
index 54432e677..6e55ade11 100644
--- a/freqtrade/plugins/protectionmanager.py
+++ b/freqtrade/plugins/protectionmanager.py
@@ -15,7 +15,7 @@ from freqtrade.resolvers import ProtectionResolver
logger = logging.getLogger(__name__)
-class ProtectionManager():
+class ProtectionManager:
def __init__(self, config: Config, protections: List) -> None:
self._config = config
diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py
index b253d66c2..40a5a75fd 100644
--- a/freqtrade/rpc/api_server/api_ws.py
+++ b/freqtrade/rpc/api_server/api_ws.py
@@ -7,12 +7,14 @@ from fastapi.websockets import WebSocket
from pydantic import ValidationError
from freqtrade.enums import RPCMessageType, RPCRequestType
+from freqtrade.exceptions import FreqtradeException
from freqtrade.rpc.api_server.api_auth import validate_ws_token
from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
-from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema,
- WSRequestSchema, WSWhitelistMessage)
+from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSErrorMessage,
+ WSMessageSchema, WSRequestSchema,
+ WSWhitelistMessage)
from freqtrade.rpc.rpc import RPC
@@ -27,7 +29,13 @@ async def channel_reader(channel: WebSocketChannel, rpc: RPC):
Iterate over the messages from the channel and process the request
"""
async for message in channel:
- await _process_consumer_request(message, channel, rpc)
+ try:
+ await _process_consumer_request(message, channel, rpc)
+ except FreqtradeException:
+ logger.exception(f"Error processing request from {channel}")
+ response = WSErrorMessage(data='Error processing request')
+
+ await channel.send(response.dict(exclude_none=True))
async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream):
@@ -62,13 +70,13 @@ async def _process_consumer_request(
logger.error(f"Invalid request from {channel}: {e}")
return
- type, data = websocket_request.type, websocket_request.data
+ type_, data = websocket_request.type, websocket_request.data
response: WSMessageSchema
- logger.debug(f"Request of type {type} from {channel}")
+ logger.debug(f"Request of type {type_} from {channel}")
# If we have a request of type SUBSCRIBE, set the topics in this channel
- if type == RPCRequestType.SUBSCRIBE:
+ if type_ == RPCRequestType.SUBSCRIBE:
# If the request is empty, do nothing
if not data:
return
@@ -80,7 +88,7 @@ async def _process_consumer_request(
# We don't send a response for subscriptions
return
- elif type == RPCRequestType.WHITELIST:
+ elif type_ == RPCRequestType.WHITELIST:
# Get whitelist
whitelist = rpc._ws_request_whitelist()
@@ -88,7 +96,7 @@ async def _process_consumer_request(
response = WSWhitelistMessage(data=whitelist)
await channel.send(response.dict(exclude_none=True))
- elif type == RPCRequestType.ANALYZED_DF:
+ elif type_ == RPCRequestType.ANALYZED_DF:
# Limit the amount of candles per dataframe to 'limit' or 1500
limit = int(min(data.get('limit', 1500), 1500)) if data else None
pair = data.get('pair', None) if data else None
diff --git a/freqtrade/rpc/api_server/webserver_bgwork.py b/freqtrade/rpc/api_server/webserver_bgwork.py
index 3846fe138..13f45227e 100644
--- a/freqtrade/rpc/api_server/webserver_bgwork.py
+++ b/freqtrade/rpc/api_server/webserver_bgwork.py
@@ -14,7 +14,7 @@ class JobsContainer(TypedDict):
error: Optional[str]
-class ApiBG():
+class ApiBG:
# Backtesting type: Backtesting
bt: Dict[str, Any] = {
'bt': None,
diff --git a/freqtrade/rpc/api_server/ws_schemas.py b/freqtrade/rpc/api_server/ws_schemas.py
index 292672b60..af98bd532 100644
--- a/freqtrade/rpc/api_server/ws_schemas.py
+++ b/freqtrade/rpc/api_server/ws_schemas.py
@@ -66,4 +66,9 @@ class WSAnalyzedDFMessage(WSMessageSchema):
type: RPCMessageType = RPCMessageType.ANALYZED_DF
data: AnalyzedDFData
+
+class WSErrorMessage(WSMessageSchema):
+ type: RPCMessageType = RPCMessageType.EXCEPTION
+ data: str
+
# --------------------------------------------------------------------------
diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py
index d0655b504..1e9ebe1ae 100644
--- a/freqtrade/strategy/interface.py
+++ b/freqtrade/strategy/interface.py
@@ -168,7 +168,7 @@ class IStrategy(ABC, HyperStrategyMixin):
download_all_data_for_training(self.dp, self.config)
else:
# Gracious failures if freqAI is disabled but "start" is called.
- class DummyClass():
+ class DummyClass:
def start(self, *args, **kwargs):
raise OperationalException(
'freqAI is not enabled. '
diff --git a/freqtrade/util/__init__.py b/freqtrade/util/__init__.py
index bed65a54b..92c79b899 100644
--- a/freqtrade/util/__init__.py
+++ b/freqtrade/util/__init__.py
@@ -1,5 +1,5 @@
from freqtrade.util.datetime_helpers import (dt_floor_day, dt_from_ts, dt_humanize, dt_now, dt_ts,
- dt_utc, shorten_date)
+ dt_utc, format_ms_time, shorten_date)
from freqtrade.util.ft_precise import FtPrecise
from freqtrade.util.periodic_cache import PeriodicCache
@@ -7,11 +7,12 @@ from freqtrade.util.periodic_cache import PeriodicCache
__all__ = [
'dt_floor_day',
'dt_from_ts',
+ 'dt_humanize',
'dt_now',
'dt_ts',
'dt_utc',
- 'dt_humanize',
- 'shorten_date',
+ 'format_ms_time',
'FtPrecise',
'PeriodicCache',
+ 'shorten_date',
]
diff --git a/freqtrade/util/datetime_helpers.py b/freqtrade/util/datetime_helpers.py
index 39d134e11..7f44cbdb0 100644
--- a/freqtrade/util/datetime_helpers.py
+++ b/freqtrade/util/datetime_helpers.py
@@ -61,3 +61,11 @@ def dt_humanize(dt: datetime, **kwargs) -> str:
:param kwargs: kwargs to pass to arrow's humanize()
"""
return arrow.get(dt).humanize(**kwargs)
+
+
+def format_ms_time(date: int) -> str:
+ """
+ convert MS date to readable format.
+ : epoch-string in ms
+ """
+ return datetime.fromtimestamp(date / 1000.0).strftime('%Y-%m-%dT%H:%M:%S')
diff --git a/requirements-dev.txt b/requirements-dev.txt
index b1633fd49..282d35101 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -7,10 +7,10 @@
-r docs/requirements-docs.txt
coveralls==3.3.1
-ruff==0.0.272
-mypy==1.3.0
+ruff==0.0.275
+mypy==1.4.1
pre-commit==3.3.3
-pytest==7.3.2
+pytest==7.4.0
pytest-asyncio==0.21.0
pytest-cov==4.1.0
pytest-mock==3.11.1
@@ -20,7 +20,7 @@ isort==5.12.0
time-machine==2.10.0
# Convert jupyter notebooks to markdown documents
-nbconvert==7.5.0
+nbconvert==7.6.0
# mypy types
types-cachetools==5.3.0.5
diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt
index 2672f9c38..74c6d4ebe 100644
--- a/requirements-freqai-rl.txt
+++ b/requirements-freqai-rl.txt
@@ -5,7 +5,7 @@
torch==2.0.1
#until these branches will be released we can use this
gymnasium==0.28.1
-stable_baselines3==2.0.0a13
+stable_baselines3==2.0.0
sb3_contrib>=2.0.0a9
# Progress bar for stable-baselines3 and sb3-contrib
tqdm==4.65.0
diff --git a/requirements-freqai.txt b/requirements-freqai.txt
index 2eacbaffb..4a25a86ff 100644
--- a/requirements-freqai.txt
+++ b/requirements-freqai.txt
@@ -9,4 +9,4 @@ catboost==1.2; 'arm' not in platform_machine
lightgbm==3.3.5
xgboost==1.7.6
tensorboard==2.13.0
-datasieve==0.1.5
+datasieve==0.1.7
diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt
index 163fee75f..aafbad608 100644
--- a/requirements-hyperopt.txt
+++ b/requirements-hyperopt.txt
@@ -2,7 +2,8 @@
-r requirements.txt
# Required for hyperopt
-scipy==1.10.1
+scipy==1.11.1; python_version >= '3.9'
+scipy==1.10.1; python_version < '3.9'
scikit-learn==1.1.3
scikit-optimize==0.9.0
filelock==3.12.2
diff --git a/requirements.txt b/requirements.txt
index 6a2ae6318..615c00d2f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,12 @@
numpy==1.24.3
-pandas==2.0.2
+pandas==2.0.3
pandas-ta==0.3.14b
-ccxt==3.1.44
+ccxt==4.0.12
cryptography==41.0.1; platform_machine != 'armv7l'
cryptography==40.0.1; platform_machine == 'armv7l'
aiohttp==3.8.4
-SQLAlchemy==2.0.16
+SQLAlchemy==2.0.17
python-telegram-bot==20.3
# can't be hard-pinned due to telegram-bot pinning httpx with ~
httpx>=0.24.1
@@ -38,7 +38,7 @@ orjson==3.9.1
sdnotify==0.3.2
# API Server
-fastapi==0.97.0
+fastapi==0.99.1
pydantic==1.10.9
uvicorn==0.22.0
pyjwt==2.7.0
@@ -60,5 +60,5 @@ schedule==1.2.0
websockets==11.0.3
janus==1.0.0
-ast-comments==1.0.1
+ast-comments==1.1.0
packaging==23.1
diff --git a/scripts/rest_client.py b/scripts/rest_client.py
index f9c9858ed..2b4690287 100755
--- a/scripts/rest_client.py
+++ b/scripts/rest_client.py
@@ -29,7 +29,7 @@ logging.basicConfig(
logger = logging.getLogger("ft_rest_client")
-class FtRestClient():
+class FtRestClient:
def __init__(self, serverurl, username=None, password=None):
diff --git a/tests/exchange/test_ccxt_compat.py b/tests/exchange/test_ccxt_compat.py
index 404b51d10..c1967abcd 100644
--- a/tests/exchange/test_ccxt_compat.py
+++ b/tests/exchange/test_ccxt_compat.py
@@ -43,6 +43,7 @@ EXCHANGES = {
'hasQuoteVolumeFutures': True,
'leverage_tiers_public': False,
'leverage_in_spot_market': False,
+ 'trades_lookback_hours': 4,
'private_methods': [
'fapiPrivateGetPositionSideDual',
'fapiPrivateGetMultiAssetsMargin'
@@ -98,6 +99,7 @@ EXCHANGES = {
'timeframe': '1h',
'leverage_tiers_public': False,
'leverage_in_spot_market': True,
+ 'trades_lookback_hours': 12,
},
'kucoin': {
'pair': 'XRP/USDT',
@@ -342,7 +344,7 @@ def exchange_futures(request, exchange_conf, class_mocker):
@pytest.mark.longrun
-class TestCCXTExchange():
+class TestCCXTExchange:
def test_load_markets(self, exchange: EXCHANGE_FIXTURE_TYPE):
exch, exchangename = exchange
@@ -640,7 +642,21 @@ class TestCCXTExchange():
assert isinstance(funding_fee, float)
# assert funding_fee > 0
- # TODO: tests fetch_trades (?)
+ def test_ccxt__async_get_trade_history(self, exchange: EXCHANGE_FIXTURE_TYPE):
+ exch, exchangename = exchange
+ if not (lookback := EXCHANGES[exchangename].get('trades_lookback_hours')):
+ pytest.skip('test_fetch_trades not enabled for this exchange')
+ pair = EXCHANGES[exchangename]['pair']
+ since = int((datetime.now(timezone.utc) - timedelta(hours=lookback)).timestamp() * 1000)
+ res = exch.loop.run_until_complete(
+ exch._async_get_trade_history(pair, since, None, None)
+ )
+ assert len(res) == 2
+ res_pair, res_trades = res
+ assert res_pair == pair
+ assert isinstance(res_trades, list)
+ assert res_trades[0][0] >= since
+ assert len(res_trades) > 1200
def test_ccxt_get_fee(self, exchange: EXCHANGE_FIXTURE_TYPE):
exch, exchangename = exchange
diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py
index bef942b43..a333cda9d 100644
--- a/tests/optimize/test_backtesting.py
+++ b/tests/optimize/test_backtesting.py
@@ -1437,9 +1437,11 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir):
strattable_mock = MagicMock()
strat_summary = MagicMock()
- mocker.patch.multiple('freqtrade.optimize.optimize_reports',
+ mocker.patch.multiple('freqtrade.optimize.optimize_reports.bt_output',
text_table_bt_results=text_table_mock,
text_table_strategy=strattable_mock,
+ )
+ mocker.patch.multiple('freqtrade.optimize.optimize_reports.optimize_reports',
generate_pair_metrics=MagicMock(),
generate_exit_reason_stats=sell_reason_mock,
generate_strategy_comparison=strat_summary,
diff --git a/tests/optimize/test_optimize_reports.py b/tests/optimize/test_optimize_reports.py
index 82e8a46fb..7b85e7978 100644
--- a/tests/optimize/test_optimize_reports.py
+++ b/tests/optimize/test_optimize_reports.py
@@ -14,15 +14,16 @@ from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backte
load_backtest_stats)
from freqtrade.edge import PairInfo
from freqtrade.enums import ExitType
-from freqtrade.optimize.optimize_reports import (_get_resample_from_period, generate_backtest_stats,
- generate_daily_stats, generate_edge_table,
- generate_exit_reason_stats, generate_pair_metrics,
+from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_daily_stats,
+ generate_edge_table, generate_exit_reason_stats,
+ generate_pair_metrics,
generate_periodic_breakdown_stats,
generate_strategy_comparison,
generate_trading_stats, show_sorted_pairlist,
store_backtest_analysis_results,
store_backtest_stats, text_table_bt_results,
text_table_exit_reason, text_table_strategy)
+from freqtrade.optimize.optimize_reports.optimize_reports import _get_resample_from_period
from freqtrade.resolvers.strategy_resolver import StrategyResolver
from freqtrade.util import dt_ts
from freqtrade.util.datetime_helpers import dt_from_ts, dt_utc
@@ -209,7 +210,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir):
def test_store_backtest_stats(testdatadir, mocker):
- dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.file_dump_json')
+ dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.bt_storage.file_dump_json')
store_backtest_stats(testdatadir, {'metadata': {}}, '2022_01_01_15_05_13')
@@ -228,7 +229,8 @@ def test_store_backtest_stats(testdatadir, mocker):
def test_store_backtest_candles(testdatadir, mocker):
- dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.file_dump_joblib')
+ dump_mock = mocker.patch(
+ 'freqtrade.optimize.optimize_reports.bt_storage.file_dump_joblib')
candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}}
diff --git a/tests/test_misc.py b/tests/test_misc.py
index 03a236d73..21c832c2c 100644
--- a/tests/test_misc.py
+++ b/tests/test_misc.py
@@ -1,6 +1,5 @@
# pragma pylint: disable=missing-docstring,C0103
-import datetime
from copy import deepcopy
from pathlib import Path
from unittest.mock import MagicMock
@@ -9,7 +8,7 @@ import pandas as pd
import pytest
from freqtrade.misc import (dataframe_to_json, decimals_per_coin, deep_merge_dicts, file_dump_json,
- file_load_json, format_ms_time, json_to_dataframe, pair_to_filename,
+ file_load_json, json_to_dataframe, pair_to_filename,
parse_db_uri_for_logging, plural, render_template,
render_template_with_fallback, round_coin_value, safe_value_fallback,
safe_value_fallback2)
@@ -91,19 +90,6 @@ def test_pair_to_filename(pair, expected_result):
assert pair_s == expected_result
-def test_format_ms_time() -> None:
- # Date 2018-04-10 18:02:01
- date_in_epoch_ms = 1523383321000
- date = format_ms_time(date_in_epoch_ms)
- assert type(date) is str
- res = datetime.datetime(2018, 4, 10, 18, 2, 1, tzinfo=datetime.timezone.utc)
- assert date == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
- res = datetime.datetime(2017, 12, 13, 8, 2, 1, tzinfo=datetime.timezone.utc)
- # Date 2017-12-13 08:02:01
- date_in_epoch_ms = 1513152121000
- assert format_ms_time(date_in_epoch_ms) == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
-
-
def test_safe_value_fallback():
dict1 = {'keya': None, 'keyb': 2, 'keyc': 5, 'keyd': None}
assert safe_value_fallback(dict1, 'keya', 'keyb') == 2
diff --git a/tests/utils/test_datetime_helpers.py b/tests/utils/test_datetime_helpers.py
index 5aec0da54..222410027 100644
--- a/tests/utils/test_datetime_helpers.py
+++ b/tests/utils/test_datetime_helpers.py
@@ -3,8 +3,8 @@ from datetime import datetime, timedelta, timezone
import pytest
import time_machine
-from freqtrade.util import dt_floor_day, dt_from_ts, dt_now, dt_ts, dt_utc, shorten_date
-from freqtrade.util.datetime_helpers import dt_humanize
+from freqtrade.util import (dt_floor_day, dt_from_ts, dt_humanize, dt_now, dt_ts, dt_utc,
+ format_ms_time, shorten_date)
def test_dt_now():
@@ -57,3 +57,16 @@ def test_dt_humanize() -> None:
assert dt_humanize(dt_now()) == 'just now'
assert dt_humanize(dt_now(), only_distance=True) == 'instantly'
assert dt_humanize(dt_now() - timedelta(hours=16), only_distance=True) == '16 hours'
+
+
+def test_format_ms_time() -> None:
+ # Date 2018-04-10 18:02:01
+ date_in_epoch_ms = 1523383321000
+ date = format_ms_time(date_in_epoch_ms)
+ assert type(date) is str
+ res = datetime(2018, 4, 10, 18, 2, 1, tzinfo=timezone.utc)
+ assert date == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')
+ res = datetime(2017, 12, 13, 8, 2, 1, tzinfo=timezone.utc)
+ # Date 2017-12-13 08:02:01
+ date_in_epoch_ms = 1513152121000
+ assert format_ms_time(date_in_epoch_ms) == res.astimezone(None).strftime('%Y-%m-%dT%H:%M:%S')