Merge pull request #8862 from freqtrade/new_release

New release 2023.6
This commit is contained in:
Matthias
2023-07-07 20:08:27 +02:00
committed by GitHub
125 changed files with 4713 additions and 2519 deletions

View File

@@ -1,11 +1,12 @@
FROM freqtradeorg/freqtrade:develop
FROM freqtradeorg/freqtrade:develop_freqairl
USER root
# Install dependencies
COPY requirements-dev.txt /freqtrade/
RUN apt-get update \
&& apt-get -y install git mercurial sudo vim build-essential \
&& apt-get -y install --no-install-recommends apt-utils dialog \
&& apt-get -y install --no-install-recommends git sudo vim build-essential \
&& apt-get clean \
&& mkdir -p /home/ftuser/.vscode-server /home/ftuser/.vscode-server-insiders /home/ftuser/commandhistory \
&& echo "export PROMPT_COMMAND='history -a'" >> /home/ftuser/.bashrc \

View File

@@ -19,23 +19,24 @@
"postCreateCommand": "freqtrade create-userdir --userdir user_data/",
"workspaceFolder": "/workspaces/freqtrade",
"settings": {
"terminal.integrated.shell.linux": "/bin/bash",
"editor.insertSpaces": true,
"files.trimTrailingWhitespace": true,
"[markdown]": {
"files.trimTrailingWhitespace": false,
"customizations": {
"settings": {
"terminal.integrated.shell.linux": "/bin/bash",
"editor.insertSpaces": true,
"files.trimTrailingWhitespace": true,
"[markdown]": {
"files.trimTrailingWhitespace": false,
},
"python.pythonPath": "/usr/local/bin/python",
},
"python.pythonPath": "/usr/local/bin/python",
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance",
"davidanson.vscode-markdownlint",
"ms-azuretools.vscode-docker",
"vscode-icons-team.vscode-icons",
],
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance",
"davidanson.vscode-markdownlint",
"ms-azuretools.vscode-docker",
"vscode-icons-team.vscode-icons",
],
}
}

View File

@@ -136,6 +136,7 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
check-latest: true
- name: Cache_dependencies
uses: actions/cache@v3
@@ -159,7 +160,8 @@ jobs:
- name: Installation - macOS
if: runner.os == 'macOS'
run: |
brew update
# brew update
# TODO: Should be the brew upgrade
# homebrew fails to update python due to unlinking failures
# https://github.com/actions/runner-images/issues/6817
rm /usr/local/bin/2to3 || true
@@ -459,7 +461,7 @@ jobs:
python setup.py sdist bdist_wheel
- name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@v1.8.6
uses: pypa/gh-action-pypi-publish@v1.8.7
if: (github.event_name == 'release')
with:
user: __token__
@@ -467,7 +469,7 @@ jobs:
repository_url: https://test.pypi.org/legacy/
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.8.6
uses: pypa/gh-action-pypi-publish@v1.8.7
if: (github.event_name == 'release')
with:
user: __token__

View File

@@ -8,17 +8,17 @@ repos:
# stages: [push]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "v1.0.1"
rev: "v1.3.0"
hooks:
- id: mypy
exclude: build_helpers
additional_dependencies:
- types-cachetools==5.3.0.5
- types-filelock==3.2.7
- types-requests==2.30.0.0
- types-requests==2.31.0.1
- types-tabulate==0.9.0.2
- types-python-dateutil==2.8.19.13
- SQLAlchemy==2.0.15
- SQLAlchemy==2.0.17
# stages: [push]
- repo: https://github.com/pycqa/isort
@@ -30,7 +30,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.263'
rev: 'v0.0.270'
hooks:
- id: ruff

View File

@@ -1,4 +1,4 @@
FROM python:3.10.11-slim-bullseye as base
FROM python:3.11.4-slim-bullseye as base
# Setup env
ENV LANG C.UTF-8

View File

@@ -136,7 +136,7 @@ class MyAwesomeStrategy(IStrategy):
### Dynamic parameters
Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
Parameters can also be defined dynamically, but must be available to the instance once the [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
``` python

View File

@@ -6,7 +6,7 @@ To download data (candles / OHLCV) needed for backtesting and hyperoptimization
If no additional parameter is specified, freqtrade will download data for `"1m"` and `"5m"` timeframes for the last 30 days.
Exchange and pairs will come from `config.json` (if specified using `-c/--config`).
Otherwise `--exchange` becomes mandatory.
Without provided configuration, `--exchange` becomes mandatory.
You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used.
@@ -83,40 +83,47 @@ Common arguments:
```
!!! Tip "Downloading all data for one quote currency"
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
!!! Note "Startup period"
`download-data` is a strategy-independent command. The idea is to download a big chunk of data once, and then iteratively increase the amount of data stored.
For that reason, `download-data` does not care about the "startup-period" defined in a strategy. It's up to the user to download additional days if the backtest should start at a specific point in time (while respecting startup period).
### Pairs file
### Start download
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
If you are using Binance for example:
- create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
- update the `pairs.json` file to contain the currency pairs you are interested in.
A very simple command (assuming an available `config.json` file) can look as follows.
```bash
mkdir -p user_data/data/binance
touch user_data/data/binance/pairs.json
freqtrade download-data --exchange binance
```
The format of the `pairs.json` file is a simple json list.
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
This will download historical candle (OHLCV) data for all the currency pairs defined in the configuration.
``` json
[
"ETH/BTC",
"ETH/USDT",
"BTC/USDT",
"XRP/ETH"
]
Alternatively, specify the pairs directly
```bash
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
```
!!! Tip "Downloading all data for one quote currency"
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
or as regex (in this case, to download all active USDT pairs)
```bash
freqtrade download-data --exchange binance --pairs .*/USDT
```
### Other Notes
* To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
* To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
* To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
* To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
* To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
??? Note "Permission denied errors"
If your configuration directory `user_data` was made by docker, you may get the following error:
@@ -131,39 +138,7 @@ Mixing different stake-currencies is allowed for this file, since it's only used
sudo chown -R $UID:$GID user_data
```
### Start download
Then run:
```bash
freqtrade download-data --exchange binance
```
This will download historical candle (OHLCV) data for all the currency pairs you defined in `pairs.json`.
Alternatively, specify the pairs directly
```bash
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
```
or as regex (to download all active USDT pairs)
```bash
freqtrade download-data --exchange binance --pairs .*/USDT
```
### Other Notes
- To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
- To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
- To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
- To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
- To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
- Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
- To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
#### Download additional data before the current timerange
### Download additional data before the current timerange
Assuming you downloaded all data from 2022 (`--timerange 20220101-`) - but you'd now like to also backtest with earlier data.
You can do so by using the `--prepend` flag, combined with `--timerange` - specifying an end-date.
@@ -238,7 +213,36 @@ Size has been taken from the BTC/USDT 1m spot combination for the timerange spec
To have a best performance/size mix, we recommend the use of either feather or parquet.
#### Sub-command convert data
### Pairs file
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
If you are using Binance for example:
* create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
* update the `pairs.json` file to contain the currency pairs you are interested in.
```bash
mkdir -p user_data/data/binance
touch user_data/data/binance/pairs.json
```
The format of the `pairs.json` file is a simple json list.
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
``` json
[
"ETH/BTC",
"ETH/USDT",
"BTC/USDT",
"XRP/ETH"
]
```
!!! Note
The `pairs.json` file is only used when no configuration is loaded (implicitly by naming, or via `--config` flag).
You can force the usage of this file via `--pairs-file pairs.json` - however we recommend to use the pairlist from within the configuration, either via `exchange.pair_whitelist` or `pairs` setting in the configuration.
## Sub-command convert data
```
usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
@@ -290,7 +294,7 @@ Common arguments:
```
##### Example converting data
### Example converting data
The following command will convert all candle (OHLCV) data available in `~/.freqtrade/data/binance` from json to jsongz, saving diskspace in the process.
It'll also remove original json data files (`--erase` parameter).
@@ -299,7 +303,7 @@ It'll also remove original json data files (`--erase` parameter).
freqtrade convert-data --format-from json --format-to jsongz --datadir ~/.freqtrade/data/binance -t 5m 15m --erase
```
#### Sub-command convert trade data
## Sub-command convert trade data
```
usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
@@ -342,7 +346,7 @@ Common arguments:
```
##### Example converting trades
### Example converting trades
The following command will convert all available trade-data in `~/.freqtrade/data/kraken` from jsongz to json.
It'll also remove original jsongz data files (`--erase` parameter).
@@ -351,7 +355,7 @@ It'll also remove original jsongz data files (`--erase` parameter).
freqtrade convert-trade-data --format-from jsongz --format-to json --datadir ~/.freqtrade/data/kraken --erase
```
### Sub-command trades to ohlcv
## Sub-command trades to ohlcv
When you need to use `--dl-trades` (kraken only) to download data, conversion of trades data to ohlcv data is the last step.
This command will allow you to repeat this last step for additional timeframes without re-downloading the data.
@@ -400,13 +404,13 @@ Common arguments:
```
#### Example trade-to-ohlcv conversion
### Example trade-to-ohlcv conversion
``` bash
freqtrade trades-to-ohlcv --exchange kraken -t 5m 1h 1d --pairs BTC/EUR ETH/EUR
```
### Sub-command list-data
## Sub-command list-data
You can get a list of downloaded data using the `list-data` sub-command.
@@ -451,7 +455,7 @@ Common arguments:
```
#### Example list-data
### Example list-data
```bash
> freqtrade list-data --userdir ~/.freqtrade/user_data/
@@ -465,7 +469,7 @@ ETH/BTC 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d
ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h
```
### Trades (tick) data
## Trades (tick) data
By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API.
This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes.

View File

@@ -43,10 +43,10 @@ The FreqAI strategy requires including the following lines of code in the standa
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# the model will return all labels created by user in `set_freqai_labels()`
# the model will return all labels created by user in `set_freqai_targets()`
# (& appended targets), an indication of whether or not the prediction should be accepted,
# the target mean/std values for each of the labels created by user in
# `feature_engineering_*` for each training period.
# `set_freqai_targets()` for each training period.
dataframe = self.freqai.start(dataframe, metadata, self)
@@ -160,7 +160,7 @@ Below are the values you can expect to include/use inside a typical strategy dat
|------------|-------------|
| `df['&*']` | Any dataframe column prepended with `&` in `set_freqai_targets()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
| `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.

View File

@@ -180,6 +180,9 @@ You can ask for each of the defined features to be included also for informative
In total, the number of features the user of the presented example strat has created is: length of `include_timeframes` * no. features in `feature_engineering_expand_*()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
$= 3 * 3 * 3 * 2 * 2 = 108$.
!!! note "Learn more about creative feature engineering"
Check out our [medium article](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665) geared toward helping users learn how to creatively engineer features.
### Gain finer control over `feature_engineering_*` functions with `metadata`
@@ -209,41 +212,7 @@ Another example, where the user wants to use live metrics from the trade databas
You need to set the standard dictionary in the config so that FreqAI can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, the pre-set values are what will be returned.
## Feature normalization
FreqAI is strict when it comes to data normalization. The train features, $X^{train}$, are always normalized to [-1, 1] using a shifted min-max normalization:
$$X^{train}_{norm} = 2 * \frac{X^{train} - X^{train}.min()}{X^{train}.max() - X^{train}.min()} - 1$$
All other data (test data and unseen prediction data in dry/live/backtest) is always automatically normalized to the training feature space according to industry standards. FreqAI stores all the metadata required to ensure that test and prediction features will be properly normalized and that predictions are properly denormalized. For this reason, it is not recommended to eschew industry standards and modify FreqAI internals - however - advanced users can do so by inheriting `train()` in their custom `IFreqaiModel` and using their own normalization functions.
## Data dimensionality reduction with Principal Component Analysis
You can reduce the dimensionality of your features by activating the `principal_component_analysis` in the config:
```json
"freqai": {
"feature_parameters" : {
"principal_component_analysis": true
}
}
```
This will perform PCA on the features and reduce their dimensionality so that the explained variance of the data set is >= 0.999. Reducing data dimensionality makes training the model faster and hence allows for more up-to-date models.
## Inlier metric
The `inlier_metric` is a metric aimed at quantifying how similar the features of a data point are to the most recent historical data points.
You define the lookback window by setting `inlier_metric_window` and FreqAI computes the distance between the present time point and each of the previous `inlier_metric_window` lookback points. A Weibull function is fit to each of the lookback distributions and its cumulative distribution function (CDF) is used to produce a quantile for each lookback point. The `inlier_metric` is then computed for each time point as the average of the corresponding lookback quantiles. The figure below explains the concept for an `inlier_metric_window` of 5.
![inlier-metric](assets/freqai_inlier-metric.jpg)
FreqAI adds the `inlier_metric` to the training features and hence gives the model access to a novel type of temporal information.
This function does **not** remove outliers from the data set.
## Weighting features for temporal importance
### Weighting features for temporal importance
FreqAI allows you to set a `weight_factor` to weight recent data more strongly than past data via an exponential function:
@@ -253,13 +222,103 @@ where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. B
![weight-factor](assets/freqai_weight-factor.jpg)
## Building the data pipeline
By default, FreqAI builds a dynamic pipeline based on user congfiguration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`.
!!! note "More information available"
Please review the [parameter table](freqai-parameter-table.md) for more information on these parameters.
### Customizing the pipeline
Users are encouraged to customize the data pipeline to their needs by building their own data pipeline. This can be done by simply setting `dk.feature_pipeline` to their desired `Pipeline` object inside their `IFreqaiModel` `train()` function, or if they prefer not to touch the `train()` function, they can override `define_data_pipeline`/`define_label_pipeline` functions in their `IFreqaiModel`:
!!! note "More information available"
FreqAI uses the the [`DataSieve`](https://github.com/emergentmethods/datasieve) pipeline, which follows the SKlearn pipeline API, but adds, among other features, coherence between the X, y, and sample_weight vector point removals, feature removal, feature name following.
```python
from datasieve.transforms import SKLearnWrapper, DissimilarityIndex
from datasieve.pipeline import Pipeline
from sklearn.preprocessing import QuantileTransformer, StandardScaler
from freqai.base_models import BaseRegressionModel
class MyFreqaiModel(BaseRegressionModel):
"""
Some cool custom model
"""
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
"""
My custom fit function
"""
model = cool_model.fit()
return model
def define_data_pipeline(self) -> Pipeline:
"""
User defines their custom feature pipeline here (if they wish)
"""
feature_pipeline = Pipeline([
('qt', SKLearnWrapper(QuantileTransformer(output_distribution='normal'))),
('di', ds.DissimilarityIndex(di_threshold=1)
])
return feature_pipeline
def define_label_pipeline(self) -> Pipeline:
"""
User defines their custom label pipeline here (if they wish)
"""
label_pipeline = Pipeline([
('qt', SKLearnWrapper(StandardScaler())),
])
return label_pipeline
```
Here, you are defining the exact pipeline that will be used for your feature set during training and prediction. You can use *most* SKLearn transformation steps by wrapping them in the `SKLearnWrapper` class as shown above. In addition, you can use any of the transformations available in the [`DataSieve` library](https://github.com/emergentmethods/datasieve).
You can easily add your own transformation by creating a class that inherits from the datasieve `BaseTransform` and implementing your `fit()`, `transform()` and `inverse_transform()` methods:
```python
from datasieve.transforms.base_transform import BaseTransform
# import whatever else you need
class MyCoolTransform(BaseTransform):
def __init__(self, **kwargs):
self.param1 = kwargs.get('param1', 1)
def fit(self, X, y=None, sample_weight=None, feature_list=None, **kwargs):
# do something with X, y, sample_weight, or/and feature_list
return X, y, sample_weight, feature_list
def transform(self, X, y=None, sample_weight=None,
feature_list=None, outlier_check=False, **kwargs):
# do something with X, y, sample_weight, or/and feature_list
return X, y, sample_weight, feature_list
def inverse_transform(self, X, y=None, sample_weight=None, feature_list=None, **kwargs):
# do/dont do something with X, y, sample_weight, or/and feature_list
return X, y, sample_weight, feature_list
```
!!! note "Hint"
You can define this custom class in the same file as your `IFreqaiModel`.
### Migrating a custom `IFreqaiModel` to the new Pipeline
If you have created your own custom `IFreqaiModel` with a custom `train()`/`predict()` function, *and* you still rely on `data_cleaning_train/predict()`, then you will need to migrate to the new pipeline. If your model does *not* rely on `data_cleaning_train/predict()`, then you do not need to worry about this migration.
More details about the migration can be found [here](strategy_migration.md#freqai---new-data-pipeline).
## Outlier detection
Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. FreqAI implements a variety of methods to identify such outliers and hence mitigate risk.
### Identifying outliers with the Dissimilarity Index (DI)
The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model.
The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model.
You can tell FreqAI to remove outlier data points from the training/test data sets using the DI by including the following statement in the config:
@@ -271,7 +330,7 @@ You can tell FreqAI to remove outlier data points from the training/test data se
}
```
The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
Which will add `DissimilarityIndex` step to your `feature_pipeline` and set the threshold to 1. The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
@@ -305,9 +364,9 @@ You can tell FreqAI to remove outlier data points from the training/test data se
}
```
The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
Which will add `SVMOutlierExtractor` step to your `feature_pipeline`. The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
FreqAI uses `sklearn.linear_model.SGDOneClassSVM` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDOneClassSVM.html) (external website)) and you can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu`.
You can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu` via the `feature_parameters.svm_params` dictionary in the config.
The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time.
@@ -325,7 +384,7 @@ You can configure FreqAI to use DBSCAN to cluster and remove outliers from the t
}
```
DBSCAN is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be.
Which will add the `DataSieveDBSCAN` step to your `feature_pipeline`. This is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be.
Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters the data set by setting all data points that have $N-1$ other data points within a distance of $\varepsilon$ as *core points*. A data point that is within a distance of $\varepsilon$ from a *core point* but that does not have $N-1$ other data points within a distance of $\varepsilon$ from itself is considered an *edge point*. A cluster is then the collection of *core points* and *edge points*. Data points that have no other data points at a distance $<\varepsilon$ are considered outliers. The figure below shows a cluster with $N = 3$.

View File

@@ -76,7 +76,7 @@ pip install -r requirements-freqai.txt
### Usage with docker
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices. If you would like to use PyTorch or Reinforcement learning, you should use the torch or RL tags, `image: freqtradeorg/freqtrade:develop_freqaitorch`, `image: freqtradeorg/freqtrade:develop_freqairl`.
!!! note "docker-compose-freqai.yml"
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file. This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
@@ -107,6 +107,13 @@ This is for performance reasons - FreqAI relies on making quick predictions/retr
it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends
new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume).
## Additional learning materials
Here we compile some external materials that provide deeper looks into various components of FreqAI:
- [Real-time head-to-head: Adaptive modeling of financial market data using XGBoost and CatBoost](https://emergentmethods.medium.com/real-time-head-to-head-adaptive-modeling-of-financial-market-data-using-xgboost-and-catboost-995a115a7495)
- [FreqAI - from price to prediction](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665)
## Credits
FreqAI is developed by a group of individuals who all contribute specific skillsets to the project.

100
docs/lookahead-analysis.md Normal file
View File

@@ -0,0 +1,100 @@
# Lookahead analysis
This page explains how to validate your strategy in terms of look ahead bias.
Checking look ahead bias is the bane of any strategy since it is sometimes very easy to introduce backtest bias -
but very hard to detect.
Backtesting initializes all timestamps at once and calculates all indicators in the beginning.
This means that if your indicators or entry/exit signals could look into future candles and falsify your backtest.
Lookahead-analysis requires historic data to be available.
To learn how to get data for the pairs and exchange you're interested in,
head over to the [Data Downloading](data-download.md) section of the documentation.
This command is built upon backtesting since it internally chains backtests and pokes at the strategy to provoke it to show look ahead bias.
This is done by not looking at the strategy itself - but at the results it returned.
The results are things like changed indicator-values and moved entries/exits compared to the full backtest.
You can use commands of [Backtesting](backtesting.md).
It also supports the lookahead-analysis of freqai strategies.
- `--cache` is forced to "none".
- `--max-open-trades` is forced to be at least equal to the number of pairs.
- `--dry-run-wallet` is forced to be basically infinite.
## Lookahead-analysis command reference
```
usage: freqtrade lookahead-analysis [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [-s NAME]
[--strategy-path PATH]
[--recursive-strategy-search]
[--freqaimodel NAME]
[--freqaimodel-path PATH] [-i TIMEFRAME]
[--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
[--max-open-trades INT]
[--stake-amount STAKE_AMOUNT]
[--fee FLOAT] [-p PAIRS [PAIRS ...]]
[--enable-protections]
[--dry-run-wallet DRY_RUN_WALLET]
[--timeframe-detail TIMEFRAME_DETAIL]
[--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]]
[--export {none,trades,signals}]
[--export-filename PATH]
[--breakdown {day,week,month} [{day,week,month} ...]]
[--cache {none,day,week,month}]
[--freqai-backtest-live-models]
[--minimum-trade-amount INT]
[--targeted-trade-amount INT]
[--lookahead-analysis-exportfilename LOOKAHEAD_ANALYSIS_EXPORTFILENAME]
options:
--minimum-trade-amount INT
Minimum trade amount for lookahead-analysis
--targeted-trade-amount INT
Targeted trade amount for lookahead analysis
--lookahead-analysis-exportfilename LOOKAHEAD_ANALYSIS_EXPORTFILENAME
Use this csv-filename to store lookahead-analysis-
results
```
!!! Note ""
The above Output was reduced to options `lookahead-analysis` adds on top of regular backtesting commands.
### Summary
Checks a given strategy for look ahead bias via lookahead-analysis
Look ahead bias means that the backtest uses data from future candles thereby not making it viable beyond backtesting
and producing false hopes for the one backtesting.
### Introduction
Many strategies - without the programmer knowing - have fallen prey to look ahead bias.
Any backtest will populate the full dataframe including all time stamps at the beginning.
If the programmer is not careful or oblivious how things work internally
(which sometimes can be really hard to find out) then it will just look into the future making the strategy amazing
but not realistic.
This command is made to try to verify the validity in the form of the aforementioned look ahead bias.
### How does the command work?
It will start with a backtest of all pairs to generate a baseline for indicators and entries/exits.
After the backtest ran, it will look if the `minimum-trade-amount` is met
and if not cancel the lookahead-analysis for this strategy.
After setting the baseline it will then do additional runs for every entry and exit separately.
When a verification-backtest is done, it will compare the indicators as the signal (either entry or exit) and report the bias.
After all signals have been verified or falsified a result-table will be generated for the user to see.
### Caveats
- `lookahead-analysis` can only verify / falsify the trades it calculated and verified.
If the strategy has many different signals / signal types, it's up to you to select appropriate parameters to ensure that all signals have triggered at least once. Not triggered signals will not have been verified.
This could lead to a false-negative (the strategy will then be reported as non-biased).
- `lookahead-analysis` has access to everything that backtesting has too.
Please don't provoke any configs like enabling position stacking.
If you decide to do so, then make doubly sure that you won't ever run out of `max_open_trades` amount and neither leftover money in your wallet.

View File

@@ -1,6 +1,6 @@
markdown==3.3.7
mkdocs==1.4.3
mkdocs-material==9.1.14
mkdocs-material==9.1.17
mdx_truly_sane_lists==1.3
pymdown-extensions==10.0.1
jinja2==3.1.2

View File

@@ -342,16 +342,12 @@ The above configuration would therefore mean:
The calculation does include fees.
To disable ROI completely, set it to an insanely high number:
To disable ROI completely, set it to an empty dictionary:
```python
minimal_roi = {
"0": 100
}
minimal_roi = {}
```
While technically not completely disabled, this would exit once the trade reaches 10000% Profit.
To use times based on candle duration (timeframe), the following snippet can be handy.
This will allow you to change the timeframe for the strategy, and ROI times will still be set as candles (e.g. after 3 candles ...)

View File

@@ -728,3 +728,86 @@ Targets now get their own, dedicated method.
return dataframe
```
### FreqAI - New data Pipeline
If you have created your own custom `IFreqaiModel` with a custom `train()`/`predict()` function, *and* you still rely on `data_cleaning_train/predict()`, then you will need to migrate to the new pipeline. If your model does *not* rely on `data_cleaning_train/predict()`, then you do not need to worry about this migration. That means that this migration guide is relevant for a very small percentage of power-users. If you stumbled upon this guide by mistake, feel free to inquire in depth about your problem in the Freqtrade discord server.
The conversion involves first removing `data_cleaning_train/predict()` and replacing them with a `define_data_pipeline()` and `define_label_pipeline()` function to your `IFreqaiModel` class:
```python linenums="1" hl_lines="11-14 47-49 55-57"
class MyCoolFreqaiModel(BaseRegressionModel):
"""
Some cool custom IFreqaiModel you made before Freqtrade version 2023.6
"""
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
# ... your custom stuff
# Remove these lines
# data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
# self.data_cleaning_train(dk)
# data_dictionary = dk.normalize_data(data_dictionary)
# (1)
# Add these lines. Now we control the pipeline fit/transform ourselves
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
# ... your custom code
return model
def predict(
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
# ... your custom stuff
# Remove these lines:
# self.data_cleaning_predict(dk)
# (2)
# Add these lines:
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
# Remove this line
# pred_df = dk.denormalize_labels_from_metadata(pred_df)
# (3)
# Replace with these lines
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
# ... your custom code
return (pred_df, dk.do_predict)
```
1. Data normalization and cleaning is now homogenized with the new pipeline definition. This is created in the new `define_data_pipeline()` and `define_label_pipeline()` functions. The `data_cleaning_train()` and `data_cleaning_predict()` functions are no longer used. You can override `define_data_pipeline()` to create your own custom pipeline if you wish.
2. Data normalization and cleaning is now homogenized with the new pipeline definition. This is created in the new `define_data_pipeline()` and `define_label_pipeline()` functions. The `data_cleaning_train()` and `data_cleaning_predict()` functions are no longer used. You can override `define_data_pipeline()` to create your own custom pipeline if you wish.
3. Data denormalization is done with the new pipeline. Replace this with the lines below.

View File

View File

@@ -1,5 +1,5 @@
""" Freqtrade bot """
__version__ = '2023.5.1'
__version__ = '2023.6'
if 'dev' in __version__:
from pathlib import Path

View File

@@ -19,7 +19,8 @@ from freqtrade.commands.list_commands import (start_list_exchanges, start_list_f
start_list_markets, start_list_strategies,
start_list_timeframes, start_show_trades)
from freqtrade.commands.optimize_commands import (start_backtesting, start_backtesting_show,
start_edge, start_hyperopt)
start_edge, start_hyperopt,
start_lookahead_analysis)
from freqtrade.commands.pairlist_commands import start_test_pairlist
from freqtrade.commands.plot_commands import start_plot_dataframe, start_plot_profit
from freqtrade.commands.strategy_utils_commands import start_strategy_update

24
freqtrade/commands/arguments.py Normal file → Executable file
View File

@@ -117,7 +117,11 @@ NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"]
ARGS_STRATEGY_UTILS = ["strategy_list", "strategy_path", "recursive_strategy_search"]
ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_search"]
ARGS_LOOKAHEAD_ANALYSIS = [
a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", 'cache')
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
class Arguments:
@@ -201,8 +205,9 @@ class Arguments:
start_install_ui, start_list_data, start_list_exchanges,
start_list_freqAI_models, start_list_markets,
start_list_strategies, start_list_timeframes,
start_new_config, start_new_strategy, start_plot_dataframe,
start_plot_profit, start_show_trades, start_strategy_update,
start_lookahead_analysis, start_new_config,
start_new_strategy, start_plot_dataframe, start_plot_profit,
start_show_trades, start_strategy_update,
start_test_pairlist, start_trading, start_webserver)
subparsers = self.parser.add_subparsers(dest='command',
@@ -451,4 +456,15 @@ class Arguments:
'files to the current version',
parents=[_common_parser])
strategy_updater_cmd.set_defaults(func=start_strategy_update)
self._build_args(optionlist=ARGS_STRATEGY_UTILS, parser=strategy_updater_cmd)
self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd)
# Add lookahead_analysis subcommand
lookahead_analayis_cmd = subparsers.add_parser(
'lookahead-analysis',
help="Check for potential look ahead bias.",
parents=[_common_parser, _strategy_parser])
lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis)
self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS,
parser=lookahead_analayis_cmd)

17
freqtrade/commands/cli_options.py Normal file → Executable file
View File

@@ -690,4 +690,21 @@ AVAILABLE_CLI_OPTIONS = {
help='Run backtest with ready models.',
action='store_true'
),
"minimum_trade_amount": Arg(
'--minimum-trade-amount',
help='Minimum trade amount for lookahead-analysis',
type=check_int_positive,
metavar='INT',
),
"targeted_trade_amount": Arg(
'--targeted-trade-amount',
help='Targeted trade amount for lookahead analysis',
type=check_int_positive,
metavar='INT',
),
"lookahead_analysis_exportfilename": Arg(
'--lookahead-analysis-exportfilename',
help="Use this csv-filename to store lookahead-analysis-results",
type=str
),
}

View File

@@ -1,18 +1,16 @@
import logging
import sys
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict, List
from typing import Any, Dict
from freqtrade.configuration import TimeRange, setup_utils_configuration
from freqtrade.constants import DATETIME_PRINT_FORMAT, Config
from freqtrade.data.converter import convert_ohlcv_format, convert_trades_format
from freqtrade.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data)
from freqtrade.data.history import convert_trades_to_ohlcv, download_data_main
from freqtrade.enums import CandleType, RunMode, TradingMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import market_is_active, timeframe_to_minutes
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist, expand_pairlist
from freqtrade.exchange import timeframe_to_minutes
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
from freqtrade.resolvers import ExchangeResolver
from freqtrade.util.binance_mig import migrate_binance_futures_data
@@ -20,7 +18,7 @@ from freqtrade.util.binance_mig import migrate_binance_futures_data
logger = logging.getLogger(__name__)
def _data_download_sanity(config: Config) -> None:
def _check_data_config_download_sanity(config: Config) -> None:
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
@@ -37,78 +35,14 @@ def start_download_data(args: Dict[str, Any]) -> None:
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
_data_download_sanity(config)
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
pairs_not_available: List[str] = []
# Init exchange
exchange = ExchangeResolver.load_exchange(config, validate=False)
markets = [p for p, m in exchange.markets.items() if market_is_active(m)
or config.get('include_inactive')]
expanded_pairs = dynamic_expand_pairlist(config, markets)
# Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False):
exchange.validate_pairs(expanded_pairs)
logger.info(f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
_check_data_config_download_sanity(config)
try:
if config.get('download_trades'):
if config.get('trading_mode') == 'futures':
raise OperationalException("Trade download not supported for futures.")
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=expanded_pairs, datadir=config['datadir'],
timerange=timerange, new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
if not exchange.get_option('ohlcv_has_history', True):
raise OperationalException(
f"Historic klines not available for {exchange.name}. "
"Please use `--dl-trades` instead for this exchange "
"(will unfortunately take a long time)."
)
migrate_binance_futures_data(config)
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange,
new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'],
trading_mode=config.get('trading_mode', 'spot'),
prepend=config.get('prepend_data', False)
)
download_data_main(config)
except KeyboardInterrupt:
sys.exit("SIGINT received, aborting ...")
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
def start_convert_trades(args: Dict[str, Any]) -> None:

View File

@@ -1,7 +1,7 @@
import csv
import logging
import sys
from typing import Any, Dict, List
from typing import Any, Dict, List, Union
import rapidjson
from colorama import Fore, Style
@@ -11,9 +11,10 @@ from tabulate import tabulate
from freqtrade.configuration import setup_utils_configuration
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import market_is_active, validate_exchanges
from freqtrade.exchange import list_available_exchanges, market_is_active
from freqtrade.misc import parse_db_uri_for_logging, plural
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
from freqtrade.types import ValidExchangesType
logger = logging.getLogger(__name__)
@@ -25,18 +26,42 @@ def start_list_exchanges(args: Dict[str, Any]) -> None:
:param args: Cli args from Arguments()
:return: None
"""
exchanges = validate_exchanges(args['list_exchanges_all'])
exchanges = list_available_exchanges(args['list_exchanges_all'])
if args['print_one_column']:
print('\n'.join([e[0] for e in exchanges]))
print('\n'.join([e['name'] for e in exchanges]))
else:
headers = {
'name': 'Exchange name',
'supported': 'Supported',
'trade_modes': 'Markets',
'comment': 'Reason',
}
headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})
def build_entry(exchange: ValidExchangesType, valid: bool):
valid_entry = {'valid': exchange['valid']} if valid else {}
result: Dict[str, Union[str, bool]] = {
'name': exchange['name'],
**valid_entry,
'supported': 'Official' if exchange['supported'] else '',
'trade_modes': ', '.join(
(f"{a['margin_mode']} " if a['margin_mode'] else '') + a['trading_mode']
for a in exchange['trade_modes']
),
'comment': exchange['comment'],
}
return result
if args['list_exchanges_all']:
print("All exchanges supported by the ccxt library:")
exchanges = [build_entry(e, True) for e in exchanges]
else:
print("Exchanges available for Freqtrade:")
exchanges = [e for e in exchanges if e[1] is not False]
exchanges = [build_entry(e, False) for e in exchanges if e['valid'] is not False]
print(tabulate(exchanges, headers=['Exchange name', 'Valid', 'reason']))
print(tabulate(exchanges, headers=headers, ))
def _print_objs_tabular(objs: List, print_colorized: bool) -> None:

View File

@@ -132,3 +132,15 @@ def start_edge(args: Dict[str, Any]) -> None:
# Initialize Edge object
edge_cli = EdgeCli(config)
edge_cli.start()
def start_lookahead_analysis(args: Dict[str, Any]) -> None:
"""
Start the backtest bias tester script
:param args: Cli args from Arguments()
:return: None
"""
from freqtrade.optimize.lookahead_analysis_helpers import LookaheadAnalysisSubFunctions
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
LookaheadAnalysisSubFunctions.start(config)

View File

@@ -203,7 +203,7 @@ class Configuration:
# This will override the strategy configuration
self._args_to_config(config, argname='timeframe',
logstring='Parameter -i/--timeframe detected ... '
'Using timeframe: {} ...')
'Using timeframe: {} ...')
self._args_to_config(config, argname='position_stacking',
logstring='Parameter --enable-position-stacking detected ...')
@@ -300,6 +300,9 @@ class Configuration:
self._args_to_config(config, argname='hyperoptexportfilename',
logstring='Using hyperopt file: {}')
self._args_to_config(config, argname='lookahead_analysis_exportfilename',
logstring='Saving lookahead analysis results into {} ...')
self._args_to_config(config, argname='epochs',
logstring='Parameter --epochs detected ... '
'Will run Hyperopt with for {} epochs ...'
@@ -474,6 +477,19 @@ class Configuration:
self._args_to_config(config, argname='analysis_csv_path',
logstring='Path to store analysis CSVs: {}')
self._args_to_config(config, argname='analysis_csv_path',
logstring='Path to store analysis CSVs: {}')
# Lookahead analysis results
self._args_to_config(config, argname='targeted_trade_amount',
logstring='Targeted Trade amount: {}')
self._args_to_config(config, argname='minimum_trade_amount',
logstring='Minimum Trade amount: {}')
self._args_to_config(config, argname='lookahead_analysis_exportfilename',
logstring='Path to store lookahead-analysis-results: {}')
def _process_runmode(self, config: Config) -> None:
self._args_to_config(config, argname='dry_run',
@@ -552,6 +568,7 @@ class Configuration:
# Fall back to /dl_path/pairs.json
pairs_file = config['datadir'] / 'pairs.json'
if pairs_file.exists():
logger.info(f'Reading pairs file "{pairs_file}".')
config['pairs'] = load_file(pairs_file)
if 'pairs' in config and isinstance(config['pairs'], list):
config['pairs'].sort()

View File

@@ -6,6 +6,8 @@ import re
from datetime import datetime, timezone
from typing import Optional
from typing_extensions import Self
from freqtrade.constants import DATETIME_PRINT_FORMAT
from freqtrade.exceptions import OperationalException
@@ -107,15 +109,15 @@ class TimeRange:
self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)
self.starttype = 'date'
@staticmethod
def parse_timerange(text: Optional[str]) -> 'TimeRange':
@classmethod
def parse_timerange(cls, text: Optional[str]) -> Self:
"""
Parse the value of the argument --timerange to determine what is the range desired
:param text: value from --timerange
:return: Start and End range period
"""
if not text:
return TimeRange(None, None, 0, 0)
return cls(None, None, 0, 0)
syntax = [(r'^-(\d{8})$', (None, 'date')),
(r'^(\d{8})-$', ('date', None)),
(r'^(\d{8})-(\d{8})$', ('date', 'date')),
@@ -156,5 +158,5 @@ class TimeRange:
if start > stop > 0:
raise OperationalException(
f'Start date is after stop date for timerange "{text}"')
return TimeRange(stype[0], stype[1], start, stop)
return cls(stype[0], stype[1], start, stop)
raise OperationalException(f'Incorrect syntax for timerange "{text}"')

View File

@@ -8,6 +8,7 @@ from typing import Any, Dict, List, Literal, Tuple
from freqtrade.enums import CandleType, PriceType, RPCMessageType
DOCS_LINK = "https://www.freqtrade.io/en/stable"
DEFAULT_CONFIG = 'config.json'
DEFAULT_EXCHANGE = 'bittrex'
PROCESS_THROTTLE_SECS = 5 # sec
@@ -111,6 +112,8 @@ MINIMAL_CONFIG = {
}
}
__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}
# Required json-schema for user specified config
CONF_SCHEMA = {
'type': 'object',
@@ -148,7 +151,6 @@ CONF_SCHEMA = {
'patternProperties': {
'^[0-9.]+$': {'type': 'number'}
},
'minProperties': 1
},
'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5},
'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True, 'minimum': -1},
@@ -164,6 +166,9 @@ CONF_SCHEMA = {
'trading_mode': {'type': 'string', 'enum': TRADING_MODES},
'margin_mode': {'type': 'string', 'enum': MARGIN_MODES},
'reduce_df_footprint': {'type': 'boolean', 'default': False},
'minimum_trade_amount': {'type': 'number', 'default': 10},
'targeted_trade_amount': {'type': 'number', 'default': 20},
'lookahead_analysis_exportfilename': {'type': 'string'},
'liquidation_buffer': {'type': 'number', 'minimum': 0.0, 'maximum': 0.99},
'backtest_breakdown': {
'type': 'array',
@@ -351,7 +356,8 @@ CONF_SCHEMA = {
'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},
'retries': {'type': 'integer', 'minimum': 0},
'retry_delay': {'type': 'number', 'minimum': 0},
**dict([(x, {'type': 'object'}) for x in RPCMessageType]),
**__MESSAGE_TYPE_DICT,
# **{x: {'type': 'object'} for x in RPCMessageType},
# Below -> Deprecated
'webhookentry': {'type': 'object'},
'webhookentrycancel': {'type': 'object'},

View File

@@ -6,7 +6,7 @@ Includes:
* download data from exchange and store to disk
"""
# flake8: noqa: F401
from .history_utils import (convert_trades_to_ohlcv, get_timerange, load_data, load_pair_history,
refresh_backtest_ohlcv_data, refresh_backtest_trades_data, refresh_data,
validate_backtest_data)
from .history_utils import (convert_trades_to_ohlcv, download_data_main, get_timerange, load_data,
load_pair_history, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data, refresh_data, validate_backtest_data)
from .idatahandler import get_datahandler

View File

@@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple
from pandas import DataFrame, concat
from freqtrade.configuration import TimeRange
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS
from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_DATAFRAME_COLUMNS, Config
from freqtrade.data.converter import (clean_ohlcv_dataframe, ohlcv_to_dataframe,
trades_remove_duplicates, trades_to_ohlcv)
from freqtrade.data.history.idatahandler import IDataHandler, get_datahandler
@@ -15,6 +15,8 @@ from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import Exchange
from freqtrade.misc import format_ms_time
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
from freqtrade.util.binance_mig import migrate_binance_futures_data
logger = logging.getLogger(__name__)
@@ -227,9 +229,11 @@ def _download_pair_history(pair: str, *,
)
logger.debug("Current Start: %s",
f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug("Current End: %s",
f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
# Default since_ms to 30 days if nothing is given
new_data = exchange.get_historic_ohlcv(pair=pair,
@@ -252,10 +256,12 @@ def _download_pair_history(pair: str, *,
data = clean_ohlcv_dataframe(concat([data, new_dataframe], axis=0), timeframe, pair,
fill_missing=False, drop_incomplete=False)
logger.debug("New Start: %s",
f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
logger.debug("New Start: %s",
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug("New End: %s",
f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
data_handler.ohlcv_store(pair, timeframe, data=data, candle_type=candle_type)
return True
@@ -290,7 +296,7 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes
continue
for timeframe in timeframes:
logger.info(f'Downloading pair {pair}, interval {timeframe}.')
logger.debug(f'Downloading pair {pair}, {candle_type}, interval {timeframe}.')
process = f'{idx}/{len(pairs)}'
_download_pair_history(pair=pair, process=process,
datadir=datadir, exchange=exchange,
@@ -479,3 +485,77 @@ def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime,
logger.warning("%s has missing frames: expected %s, got %s, that's %s missing values",
pair, expected_frames, dflen, expected_frames - dflen)
return found_missing
def download_data_main(config: Config) -> None:
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
pairs_not_available: List[str] = []
# Init exchange
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
exchange = ExchangeResolver.load_exchange(config, validate=False)
available_pairs = [
p for p in exchange.get_markets(
tradable_only=True, active_only=not config.get('include_inactive')
).keys()
]
expanded_pairs = dynamic_expand_pairlist(config, available_pairs)
# Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False):
exchange.validate_pairs(expanded_pairs)
logger.info(f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
# Start downloading
try:
if config.get('download_trades'):
if config.get('trading_mode') == 'futures':
raise OperationalException("Trade download not supported for futures.")
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=expanded_pairs, datadir=config['datadir'],
timerange=timerange, new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
if not exchange.get_option('ohlcv_has_history', True):
raise OperationalException(
f"Historic klines not available for {exchange.name}. "
"Please use `--dl-trades` instead for this exchange "
"(will unfortunately take a long time)."
)
migrate_binance_futures_data(config)
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange,
new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'],
trading_mode=config.get('trading_mode', 'spot'),
prepend=config.get('prepend_data', False)
)
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")

View File

@@ -1,7 +1,7 @@
from enum import Enum
class MarginMode(Enum):
class MarginMode(str, Enum):
"""
Enum to distinguish between
cross margin/futures margin_mode and

View File

@@ -13,11 +13,11 @@ from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_c
amount_to_contracts, amount_to_precision,
available_exchanges, ccxt_exchanges,
contracts_to_amount, date_minus_candles,
is_exchange_known_ccxt, market_is_active,
price_to_precision, timeframe_to_minutes,
timeframe_to_msecs, timeframe_to_next_date,
timeframe_to_prev_date, timeframe_to_seconds,
validate_exchange, validate_exchanges)
is_exchange_known_ccxt, list_available_exchanges,
market_is_active, price_to_precision,
timeframe_to_minutes, timeframe_to_msecs,
timeframe_to_next_date, timeframe_to_prev_date,
timeframe_to_seconds, validate_exchange)
from freqtrade.exchange.gate import Gate
from freqtrade.exchange.hitbtc import Hitbtc
from freqtrade.exchange.huobi import Huobi

File diff suppressed because it is too large Load Diff

View File

@@ -301,7 +301,7 @@ class Exchange:
return list((self._api.timeframes or {}).keys())
@property
def markets(self) -> Dict:
def markets(self) -> Dict[str, Any]:
"""exchange ccxt markets"""
if not self._markets:
logger.info("Markets were not loaded. Loading them now..")
@@ -1148,8 +1148,8 @@ class Exchange:
else:
limit_rate = stop_price * (2 - limit_price_pct)
bad_stop_price = ((stop_price <= limit_rate) if side ==
"sell" else (stop_price >= limit_rate))
bad_stop_price = ((stop_price < limit_rate) if side ==
"sell" else (stop_price > limit_rate))
# Ensure rate is less than stop price
if bad_stop_price:
# This can for example happen if the stop / liquidation price is set to 0
@@ -1662,39 +1662,18 @@ class Exchange:
price_side = self._get_price_side(side, is_short, conf_strategy)
price_side_word = price_side.capitalize()
if conf_strategy.get('use_order_book', False):
order_book_top = conf_strategy.get('order_book_top', 1)
if order_book is None:
order_book = self.fetch_l2_order_book(pair, order_book_top)
logger.debug('order_book %s', order_book)
# top 1 = index 0
try:
obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks'
rate = order_book[obside][order_book_top - 1][0]
except (IndexError, KeyError) as e:
logger.warning(
f"{pair} - {name} Price at location {order_book_top} from orderbook "
f"could not be determined. Orderbook: {order_book}"
)
raise PricingError from e
logger.debug(f"{pair} - {name} price from orderbook {price_side_word}"
f"side - top {order_book_top} order book {side} rate {rate:.8f}")
rate = self._get_rate_from_ob(pair, side, order_book, name, price_side,
order_book_top)
else:
logger.debug(f"Using Last {price_side_word} / Last Price")
logger.debug(f"Using Last {price_side.capitalize()} / Last Price")
if ticker is None:
ticker = self.fetch_ticker(pair)
ticker_rate = ticker[price_side]
if ticker['last'] and ticker_rate:
if side == 'entry' and ticker_rate > ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate)
elif side == 'exit' and ticker_rate < ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last'])
rate = ticker_rate
rate = self._get_rate_from_ticker(side, ticker, conf_strategy, price_side)
if rate is None:
raise PricingError(f"{name}-Rate for {pair} was empty.")
@@ -1703,6 +1682,43 @@ class Exchange:
return rate
def _get_rate_from_ticker(self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any],
price_side: BidAsk) -> Optional[float]:
"""
Get rate from ticker.
"""
ticker_rate = ticker[price_side]
if ticker['last'] and ticker_rate:
if side == 'entry' and ticker_rate > ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate)
elif side == 'exit' and ticker_rate < ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last'])
rate = ticker_rate
return rate
def _get_rate_from_ob(self, pair: str, side: EntryExit, order_book: OrderBook, name: str,
price_side: BidAsk, order_book_top: int) -> float:
"""
Get rate from orderbook
:raises: PricingError if rate could not be determined.
"""
logger.debug('order_book %s', order_book)
# top 1 = index 0
try:
obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks'
rate = order_book[obside][order_book_top - 1][0]
except (IndexError, KeyError) as e:
logger.warning(
f"{pair} - {name} Price at location {order_book_top} from orderbook "
f"could not be determined. Orderbook: {order_book}"
)
raise PricingError from e
logger.debug(f"{pair} - {name} price from orderbook {price_side.capitalize()}"
f"side - top {order_book_top} order book {side} rate {rate:.8f}")
return rate
def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]:
entry_rate = None
exit_rate = None

View File

@@ -9,7 +9,9 @@ import ccxt
from ccxt import (DECIMAL_PLACES, ROUND, ROUND_DOWN, ROUND_UP, SIGNIFICANT_DIGITS, TICK_SIZE,
TRUNCATE, decimal_to_precision)
from freqtrade.exchange.common import BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED
from freqtrade.exchange.common import (BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED,
SUPPORTED_EXCHANGES)
from freqtrade.types import ValidExchangesType
from freqtrade.util import FtPrecise
from freqtrade.util.datetime_helpers import dt_from_ts, dt_ts
@@ -55,14 +57,41 @@ def validate_exchange(exchange: str) -> Tuple[bool, str]:
return True, ''
def validate_exchanges(all_exchanges: bool) -> List[Tuple[str, bool, str]]:
def _build_exchange_list_entry(
exchange_name: str, exchangeClasses: Dict[str, Any]) -> ValidExchangesType:
valid, comment = validate_exchange(exchange_name)
result: ValidExchangesType = {
'name': exchange_name,
'valid': valid,
'supported': exchange_name.lower() in SUPPORTED_EXCHANGES,
'comment': comment,
'trade_modes': [{'trading_mode': 'spot', 'margin_mode': ''}],
}
if resolved := exchangeClasses.get(exchange_name.lower()):
supported_modes = [{'trading_mode': 'spot', 'margin_mode': ''}] + [
{'trading_mode': tm.value, 'margin_mode': mm.value}
for tm, mm in resolved['class']._supported_trading_mode_margin_pairs
]
result.update({
'trade_modes': supported_modes,
})
return result
def list_available_exchanges(all_exchanges: bool) -> List[ValidExchangesType]:
"""
:return: List of tuples with exchangename, valid, reason.
"""
exchanges = ccxt_exchanges() if all_exchanges else available_exchanges()
exchanges_valid = [
(e, *validate_exchange(e)) for e in exchanges
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
subclassed = {e['name'].lower(): e for e in ExchangeResolver.search_all_objects({}, False)}
exchanges_valid: List[ValidExchangesType] = [
_build_exchange_list_entry(e, subclassed) for e in exchanges
]
return exchanges_valid

View File

@@ -33,7 +33,6 @@ class Gate(Exchange):
_ft_has_futures: Dict = {
"needs_trading_fees": True,
"marketOrderRequiresPrice": False,
"tickers_have_bid_ask": False,
"fee_cost_in_contracts": False, # Set explicitly to false for clarity
"order_props_in_contracts": ['amount', 'filled', 'remaining'],
"stop_price_type_field": "price_type",

View File

@@ -125,6 +125,20 @@ class Okx(Exchange):
params['posSide'] = self._get_posSide(side, reduceOnly)
return params
def __fetch_leverage_already_set(self, pair: str, leverage: float, side: BuySell) -> bool:
try:
res_lev = self._api.fetch_leverage(symbol=pair, params={
"mgnMode": self.margin_mode.value,
"posSide": self._get_posSide(side, False),
})
self._log_exchange_response('get_leverage', res_lev)
already_set = all(float(x['lever']) == leverage for x in res_lev['data'])
return already_set
except ccxt.BaseError:
# Assume all errors as "not set yet"
return False
@retrier
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
if self.trading_mode != TradingMode.SPOT and self.margin_mode is not None:
@@ -141,8 +155,11 @@ class Okx(Exchange):
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e
already_set = self.__fetch_leverage_already_set(pair, leverage, side)
if not already_set:
raise TemporaryError(
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}'
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
@@ -182,6 +199,7 @@ class Okx(Exchange):
order_reg['type'] = 'stoploss'
order_reg['status_stop'] = 'triggered'
return order_reg
order = self._order_contracts_to_amount(order)
order['type'] = 'stoploss'
return order

View File

@@ -2,7 +2,7 @@ import logging
import random
from abc import abstractmethod
from enum import Enum
from typing import Optional, Type, Union
from typing import List, Optional, Type, Union
import gymnasium as gym
import numpy as np
@@ -141,6 +141,9 @@ class BaseEnvironment(gym.Env):
Unique to the environment action count. Must be inherited.
"""
def action_masks(self) -> List[bool]:
return [self._is_valid(action.value) for action in self.actions]
def seed(self, seed: int = 1):
self.np_random, seed = seeding.np_random(seed)
return [seed]

View File

@@ -13,7 +13,8 @@ import pandas as pd
import torch as th
import torch.multiprocessing
from pandas import DataFrame
from stable_baselines3.common.callbacks import EvalCallback
from sb3_contrib.common.maskable.callbacks import MaskableEvalCallback
from sb3_contrib.common.maskable.utils import is_masking_supported
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.utils import set_random_seed
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
@@ -48,7 +49,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_callback: Optional[EvalCallback] = None
self.eval_callback: Optional[MaskableEvalCallback] = None
self.model_type = self.freqai_info['rl_config']['model_type']
self.rl_config = self.freqai_info['rl_config']
self.df_raw: DataFrame = DataFrame()
@@ -82,6 +83,9 @@ class BaseReinforcementLearningModel(IFreqaiModel):
if self.ft_params.get('use_DBSCAN_to_remove_outliers', False):
self.ft_params.update({'use_DBSCAN_to_remove_outliers': False})
logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.')
if self.ft_params.get('DI_threshold', False):
self.ft_params.update({'DI_threshold': False})
logger.warning('User tried to use DI_threshold with RL. Deactivating DI_threshold.')
if self.freqai_info['data_split_parameters'].get('shuffle', False):
self.freqai_info['data_split_parameters'].update({'shuffle': False})
logger.warning('User tried to shuffle training data. Setting shuffle to False')
@@ -107,27 +111,37 @@ class BaseReinforcementLearningModel(IFreqaiModel):
training_filter=True,
)
data_dictionary: Dict[str, Any] = dk.make_train_test_datasets(
dd: Dict[str, Any] = dk.make_train_test_datasets(
features_filtered, labels_filtered)
self.df_raw = copy.deepcopy(data_dictionary["train_features"])
self.df_raw = copy.deepcopy(dd["train_features"])
dk.fit_labels() # FIXME useless for now, but just satiating append methods
# normalize all data based on train_dataset only
prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk)
data_dictionary = dk.normalize_data(data_dictionary)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
# data cleaning/analysis
self.data_cleaning_train(dk)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
logger.info(
f'Training model on {len(dk.data_dictionary["train_features"].columns)}'
f' features and {len(data_dictionary["train_features"])} data points'
f' features and {len(dd["train_features"])} data points'
)
self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk)
self.set_train_and_eval_environments(dd, prices_train, prices_test, dk)
model = self.fit(data_dictionary, dk)
model = self.fit(dd, dk)
logger.info(f"--------------------done training {pair}--------------------")
@@ -151,9 +165,11 @@ class BaseReinforcementLearningModel(IFreqaiModel):
self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, **env_info)
self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, **env_info))
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df),
best_model_save_path=str(dk.data_path))
self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df),
best_model_save_path=str(dk.data_path),
use_masking=(self.model_type == 'MaskablePPO' and
is_masking_supported(self.eval_env)))
actions = self.train_env.get_actions()
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)
@@ -236,13 +252,10 @@ class BaseReinforcementLearningModel(IFreqaiModel):
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_dataframe = self.drop_ohlc_from_df(filtered_dataframe, dk)
dk.data_dictionary["prediction_features"] = self.drop_ohlc_from_df(filtered_dataframe, dk)
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
dk.data_dictionary["prediction_features"] = filtered_dataframe
# optional additional data cleaning/analysis
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], _, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
pred_df = self.rl_model_predict(
dk.data_dictionary["prediction_features"], dk, self.model)

View File

@@ -17,8 +17,8 @@ logger = logging.getLogger(__name__)
class BaseClassifierModel(IFreqaiModel):
"""
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
User *must* inherit from this class and set fit() and predict(). See example scripts
such as prediction_models/CatboostPredictionModel.py for guidance.
User *must* inherit from this class and set fit(). See example scripts
such as prediction_models/CatboostClassifier.py for guidance.
"""
def train(
@@ -50,21 +50,30 @@ class BaseClassifierModel(IFreqaiModel):
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date} --------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(data_dictionary, dk)
model = self.fit(dd, dk)
end_time = time()
@@ -89,10 +98,11 @@ class BaseClassifierModel(IFreqaiModel):
filtered_df, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
if self.CONV_WIDTH == 1:
@@ -107,4 +117,10 @@ class BaseClassifierModel(IFreqaiModel):
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)

View File

@@ -1,5 +1,6 @@
import logging
from typing import Dict, List, Tuple
from time import time
from typing import Any, Dict, List, Tuple
import numpy as np
import numpy.typing as npt
@@ -35,6 +36,7 @@ class BasePyTorchClassifier(BasePyTorchModel):
return dataframe
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.class_name_to_index = None
@@ -68,9 +70,12 @@ class BasePyTorchClassifier(BasePyTorchModel):
filtered_df, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
x = self.data_convertor.convert_x(
dk.data_dictionary["prediction_features"],
device=self.device
@@ -85,6 +90,13 @@ class BasePyTorchClassifier(BasePyTorchModel):
pred_df_prob = DataFrame(probs.detach().tolist(), columns=class_names)
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)
def encode_class_names(
@@ -149,3 +161,58 @@ class BasePyTorchClassifier(BasePyTorchModel):
)
return self.class_names
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
# split data into train/test data.
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(dd, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model

View File

@@ -1,12 +1,8 @@
import logging
from abc import ABC, abstractmethod
from time import time
from typing import Any
import torch
from pandas import DataFrame
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
@@ -29,51 +25,6 @@ class BasePyTorchModel(IFreqaiModel, ABC):
self.splits = ["train", "test"] if test_size != 0 else ["train"]
self.window_size = self.freqai_info.get("conv_width", 1)
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
model = self.fit(data_dictionary, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model
@property
@abstractmethod
def data_convertor(self) -> PyTorchDataConvertor:

View File

@@ -1,5 +1,6 @@
import logging
from typing import Tuple
from time import time
from typing import Any, Tuple
import numpy as np
import numpy.typing as npt
@@ -17,6 +18,7 @@ class BasePyTorchRegressor(BasePyTorchModel):
A PyTorch implementation of a regressor.
User must implement fit method
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@@ -36,10 +38,11 @@ class BasePyTorchRegressor(BasePyTorchModel):
filtered_df, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
x = self.data_convertor.convert_x(
dk.data_dictionary["prediction_features"],
device=self.device
@@ -47,5 +50,71 @@ class BasePyTorchRegressor(BasePyTorchModel):
self.model.model.eval()
y = self.model.model(x)
pred_df = DataFrame(y.detach().tolist(), columns=[dk.label_list[0]])
pred_df = dk.denormalize_labels_from_metadata(pred_df)
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
# split data into train/test data.
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(dd, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model

View File

@@ -16,8 +16,8 @@ logger = logging.getLogger(__name__)
class BaseRegressionModel(IFreqaiModel):
"""
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
User *must* inherit from this class and set fit() and predict(). See example scripts
such as prediction_models/CatboostPredictionModel.py for guidance.
User *must* inherit from this class and set fit(). See example scripts
such as prediction_models/CatboostRegressor.py for guidance.
"""
def train(
@@ -49,21 +49,33 @@ class BaseRegressionModel(IFreqaiModel):
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date} --------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(data_dictionary, dk)
model = self.fit(dd, dk)
end_time = time()
@@ -85,14 +97,12 @@ class BaseRegressionModel(IFreqaiModel):
"""
dk.find_features(unfiltered_df)
filtered_df, _ = dk.filter_features(
dk.data_dictionary["prediction_features"], _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
# optional additional data cleaning/analysis
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
if self.CONV_WIDTH == 1:
@@ -100,6 +110,11 @@ class BaseRegressionModel(IFreqaiModel):
pred_df = DataFrame(predictions, columns=dk.label_list)
pred_df = dk.denormalize_labels_from_metadata(pred_df)
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)

View File

@@ -1,70 +0,0 @@
import logging
from time import time
from typing import Any
from pandas import DataFrame
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
logger = logging.getLogger(__name__)
class BaseTensorFlowModel(IFreqaiModel):
"""
Base class for TensorFlow type models.
User *must* inherit from this class and set fit() and predict().
"""
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:param metadata: pair metadata from strategy.
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
# filter the features requested by user in the configuration file and elegantly handle NaNs
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d")
end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d")
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date} --------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
model = self.fit(data_dictionary, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model

View File

@@ -20,6 +20,7 @@ from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.data.history import load_pair_history
from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.strategy.interface import IStrategy
@@ -27,6 +28,11 @@ from freqtrade.strategy.interface import IStrategy
logger = logging.getLogger(__name__)
FEATURE_PIPELINE = "feature_pipeline"
LABEL_PIPELINE = "label_pipeline"
TRAINDF = "trained_df"
METADATA = "metadata"
class pair_info(TypedDict):
model_filename: str
@@ -424,7 +430,7 @@ class FreqaiDataDrawer:
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
dk.data["label_list"] = dk.label_list
with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp:
with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp:
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
return
@@ -449,39 +455,39 @@ class FreqaiDataDrawer:
elif self.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
model.save(save_path / f"{dk.model_filename}_model.zip")
if dk.svm_model is not None:
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
dk.data["data_path"] = str(dk.data_path)
dk.data["model_filename"] = str(dk.model_filename)
dk.data["training_features_list"] = dk.training_features_list
dk.data["label_list"] = dk.label_list
# store the metadata
with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp:
with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp:
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
# save the train data to file so we can check preds for area of applicability later
# save the pipelines to pickle files
with (save_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("wb") as fp:
cloudpickle.dump(dk.feature_pipeline, fp)
with (save_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("wb") as fp:
cloudpickle.dump(dk.label_pipeline, fp)
# save the train data to file for post processing if desired
dk.data_dictionary["train_features"].to_pickle(
save_path / f"{dk.model_filename}_trained_df.pkl"
save_path / f"{dk.model_filename}_{TRAINDF}.pkl"
)
dk.data_dictionary["train_dates"].to_pickle(
save_path / f"{dk.model_filename}_trained_dates_df.pkl"
)
if self.freqai_info["feature_parameters"].get("principal_component_analysis"):
cloudpickle.dump(
dk.pca, (dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("wb")
)
self.model_dictionary[coin] = model
self.pair_dict[coin]["model_filename"] = dk.model_filename
self.pair_dict[coin]["data_path"] = str(dk.data_path)
if coin not in self.meta_data_dictionary:
self.meta_data_dictionary[coin] = {}
self.meta_data_dictionary[coin]["train_df"] = dk.data_dictionary["train_features"]
self.meta_data_dictionary[coin]["meta_data"] = dk.data
self.meta_data_dictionary[coin][METADATA] = dk.data
self.meta_data_dictionary[coin][FEATURE_PIPELINE] = dk.feature_pipeline
self.meta_data_dictionary[coin][LABEL_PIPELINE] = dk.label_pipeline
self.save_drawer_to_disk()
return
@@ -491,7 +497,7 @@ class FreqaiDataDrawer:
Load only metadata into datakitchen to increase performance during
presaved backtesting (prediction file loading).
"""
with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp:
with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp:
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
dk.training_features_list = dk.data["training_features_list"]
dk.label_list = dk.data["label_list"]
@@ -511,15 +517,17 @@ class FreqaiDataDrawer:
dk.data_path = Path(self.pair_dict[coin]["data_path"])
if coin in self.meta_data_dictionary:
dk.data = self.meta_data_dictionary[coin]["meta_data"]
dk.data_dictionary["train_features"] = self.meta_data_dictionary[coin]["train_df"]
dk.data = self.meta_data_dictionary[coin][METADATA]
dk.feature_pipeline = self.meta_data_dictionary[coin][FEATURE_PIPELINE]
dk.label_pipeline = self.meta_data_dictionary[coin][LABEL_PIPELINE]
else:
with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp:
with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp:
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
dk.data_dictionary["train_features"] = pd.read_pickle(
dk.data_path / f"{dk.model_filename}_trained_df.pkl"
)
with (dk.data_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("rb") as fp:
dk.feature_pipeline = cloudpickle.load(fp)
with (dk.data_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("rb") as fp:
dk.label_pipeline = cloudpickle.load(fp)
dk.training_features_list = dk.data["training_features_list"]
dk.label_list = dk.data["label_list"]
@@ -529,9 +537,6 @@ class FreqaiDataDrawer:
model = self.model_dictionary[coin]
elif self.model_type == 'joblib':
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
elif self.model_type == 'keras':
from tensorflow import keras
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
mod = importlib.import_module(
self.model_type, self.freqai_info['rl_config']['model_type'])
@@ -543,9 +548,6 @@ class FreqaiDataDrawer:
model = zip["pytrainer"]
model = model.load_from_checkpoint(zip)
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
if not model:
raise OperationalException(
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
@@ -555,11 +557,6 @@ class FreqaiDataDrawer:
if coin not in self.model_dictionary:
self.model_dictionary[coin] = model
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
dk.pca = cloudpickle.load(
(dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("rb")
)
return model
def update_historic_data(self, strategy: IStrategy, dk: FreqaiDataKitchen) -> None:
@@ -639,7 +636,7 @@ class FreqaiDataDrawer:
pair=pair,
timerange=timerange,
data_format=self.config.get("dataformat_ohlcv", "json"),
candle_type=self.config.get("trading_mode", "spot"),
candle_type=self.config.get("candle_type_def", CandleType.SPOT),
)
def get_base_and_corr_dataframes(

View File

@@ -4,7 +4,6 @@ import logging
import random
import shutil
from datetime import datetime, timezone
from math import cos, sin
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
@@ -12,16 +11,12 @@ import numpy as np
import numpy.typing as npt
import pandas as pd
import psutil
from datasieve.pipeline import Pipeline
from pandas import DataFrame
from scipy import stats
from sklearn import linear_model
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.constants import DOCS_LINK, Config
from freqtrade.data.converter import reduce_dataframe_footprint
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds
@@ -81,11 +76,12 @@ class FreqaiDataKitchen:
self.backtest_predictions_folder: str = "backtesting_predictions"
self.live = live
self.pair = pair
self.svm_model: linear_model.SGDOneClassSVM = None
self.keras: bool = self.freqai_config.get("keras", False)
self.set_all_pairs()
self.backtest_live_models = config.get("freqai_backtest_live_models", False)
self.feature_pipeline = Pipeline()
self.label_pipeline = Pipeline()
self.DI_values: npt.NDArray = np.array([])
if not self.live:
self.full_path = self.get_full_models_path(self.config)
@@ -227,13 +223,7 @@ class FreqaiDataKitchen:
drop_index = pd.isnull(filtered_df).any(axis=1) # get the rows that have NaNs,
drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.
if (training_filter):
const_cols = list((filtered_df.nunique() == 1).loc[lambda x: x].index)
if const_cols:
filtered_df = filtered_df.filter(filtered_df.columns.difference(const_cols))
self.data['constant_features_list'] = const_cols
logger.warning(f"Removed features {const_cols} with constant values.")
else:
self.data['constant_features_list'] = []
# we don't care about total row number (total no. datapoints) in training, we only care
# about removing any row with NaNs
# if labels has multiple columns (user wants to train multiple modelEs), we detect here
@@ -264,8 +254,7 @@ class FreqaiDataKitchen:
self.data["filter_drop_index_training"] = drop_index
else:
if 'constant_features_list' in self.data and len(self.data['constant_features_list']):
filtered_df = self.check_pred_labels(filtered_df)
# we are backtesting so we need to preserve row number to send back to strategy,
# so now we use do_predict to avoid any prediction based on a NaN
drop_index = pd.isnull(filtered_df).any(axis=1)
@@ -307,107 +296,6 @@ class FreqaiDataKitchen:
return self.data_dictionary
def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
"""
Normalize all data in the data_dictionary according to the training dataset
:param data_dictionary: dictionary containing the cleaned and
split training/test data/labels
:returns:
:data_dictionary: updated dictionary with standardized values.
"""
# standardize the data by training stats
train_max = data_dictionary["train_features"].max()
train_min = data_dictionary["train_features"].min()
data_dictionary["train_features"] = (
2 * (data_dictionary["train_features"] - train_min) / (train_max - train_min) - 1
)
data_dictionary["test_features"] = (
2 * (data_dictionary["test_features"] - train_min) / (train_max - train_min) - 1
)
for item in train_max.keys():
self.data[item + "_max"] = train_max[item]
self.data[item + "_min"] = train_min[item]
for item in data_dictionary["train_labels"].keys():
if data_dictionary["train_labels"][item].dtype == object:
continue
train_labels_max = data_dictionary["train_labels"][item].max()
train_labels_min = data_dictionary["train_labels"][item].min()
data_dictionary["train_labels"][item] = (
2
* (data_dictionary["train_labels"][item] - train_labels_min)
/ (train_labels_max - train_labels_min)
- 1
)
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
data_dictionary["test_labels"][item] = (
2
* (data_dictionary["test_labels"][item] - train_labels_min)
/ (train_labels_max - train_labels_min)
- 1
)
self.data[f"{item}_max"] = train_labels_max
self.data[f"{item}_min"] = train_labels_min
return data_dictionary
def normalize_single_dataframe(self, df: DataFrame) -> DataFrame:
train_max = df.max()
train_min = df.min()
df = (
2 * (df - train_min) / (train_max - train_min) - 1
)
for item in train_max.keys():
self.data[item + "_max"] = train_max[item]
self.data[item + "_min"] = train_min[item]
return df
def normalize_data_from_metadata(self, df: DataFrame) -> DataFrame:
"""
Normalize a set of data using the mean and standard deviation from
the associated training data.
:param df: Dataframe to be standardized
"""
train_max = [None] * len(df.keys())
train_min = [None] * len(df.keys())
for i, item in enumerate(df.keys()):
train_max[i] = self.data[f"{item}_max"]
train_min[i] = self.data[f"{item}_min"]
train_max_series = pd.Series(train_max, index=df.keys())
train_min_series = pd.Series(train_min, index=df.keys())
df = (
2 * (df - train_min_series) / (train_max_series - train_min_series) - 1
)
return df
def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame:
"""
Denormalize a set of data using the mean and standard deviation from
the associated training data.
:param df: Dataframe of predictions to be denormalized
"""
for label in df.columns:
if df[label].dtype == object or label in self.unique_class_list:
continue
df[label] = (
(df[label] + 1)
* (self.data[f"{label}_max"] - self.data[f"{label}_min"])
/ 2
) + self.data[f"{label}_min"]
return df
def split_timerange(
self, tr: str, train_split: int = 28, bt_split: float = 7
) -> Tuple[list, list]:
@@ -452,9 +340,7 @@ class FreqaiDataKitchen:
tr_training_list_timerange.append(copy.deepcopy(timerange_train))
# associated backtest period
timerange_backtest.startts = timerange_train.stopts
timerange_backtest.stopts = timerange_backtest.startts + int(bt_period)
if timerange_backtest.stopts > config_timerange.stopts:
@@ -485,426 +371,6 @@ class FreqaiDataKitchen:
return df
def check_pred_labels(self, df_predictions: DataFrame) -> DataFrame:
"""
Check that prediction feature labels match training feature labels.
:param df_predictions: incoming predictions
"""
constant_labels = self.data['constant_features_list']
df_predictions = df_predictions.filter(
df_predictions.columns.difference(constant_labels)
)
logger.warning(
f"Removed {len(constant_labels)} features from prediction features, "
f"these were considered constant values during most recent training."
)
return df_predictions
def principal_component_analysis(self) -> None:
"""
Performs Principal Component Analysis on the data for dimensionality reduction
and outlier detection (see self.remove_outliers())
No parameters or returns, it acts on the data_dictionary held by the DataHandler.
"""
from sklearn.decomposition import PCA # avoid importing if we dont need it
pca = PCA(0.999)
pca = pca.fit(self.data_dictionary["train_features"])
n_keep_components = pca.n_components_
self.data["n_kept_components"] = n_keep_components
n_components = self.data_dictionary["train_features"].shape[1]
logger.info("reduced feature dimension by %s", n_components - n_keep_components)
logger.info("explained variance %f", np.sum(pca.explained_variance_ratio_))
train_components = pca.transform(self.data_dictionary["train_features"])
self.data_dictionary["train_features"] = pd.DataFrame(
data=train_components,
columns=["PC" + str(i) for i in range(0, n_keep_components)],
index=self.data_dictionary["train_features"].index,
)
# normalsing transformed training features
self.data_dictionary["train_features"] = self.normalize_single_dataframe(
self.data_dictionary["train_features"])
# keeping a copy of the non-transformed features so we can check for errors during
# model load from disk
self.data["training_features_list_raw"] = copy.deepcopy(self.training_features_list)
self.training_features_list = self.data_dictionary["train_features"].columns
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
test_components = pca.transform(self.data_dictionary["test_features"])
self.data_dictionary["test_features"] = pd.DataFrame(
data=test_components,
columns=["PC" + str(i) for i in range(0, n_keep_components)],
index=self.data_dictionary["test_features"].index,
)
# normalise transformed test feature to transformed training features
self.data_dictionary["test_features"] = self.normalize_data_from_metadata(
self.data_dictionary["test_features"])
self.data["n_kept_components"] = n_keep_components
self.pca = pca
logger.info(f"PCA reduced total features from {n_components} to {n_keep_components}")
if not self.data_path.is_dir():
self.data_path.mkdir(parents=True, exist_ok=True)
return None
def pca_transform(self, filtered_dataframe: DataFrame) -> None:
"""
Use an existing pca transform to transform data into components
:param filtered_dataframe: DataFrame = the cleaned dataframe
"""
pca_components = self.pca.transform(filtered_dataframe)
self.data_dictionary["prediction_features"] = pd.DataFrame(
data=pca_components,
columns=["PC" + str(i) for i in range(0, self.data["n_kept_components"])],
index=filtered_dataframe.index,
)
# normalise transformed predictions to transformed training features
self.data_dictionary["prediction_features"] = self.normalize_data_from_metadata(
self.data_dictionary["prediction_features"])
def compute_distances(self) -> float:
"""
Compute distances between each training point and every other training
point. This metric defines the neighborhood of trained data and is used
for prediction confidence in the Dissimilarity Index
"""
# logger.info("computing average mean distance for all training points")
pairwise = pairwise_distances(
self.data_dictionary["train_features"], n_jobs=self.thread_count)
# remove the diagonal distances which are itself distances ~0
np.fill_diagonal(pairwise, np.NaN)
pairwise = pairwise.reshape(-1, 1)
avg_mean_dist = pairwise[~np.isnan(pairwise)].mean()
return avg_mean_dist
def get_outlier_percentage(self, dropped_pts: npt.NDArray) -> float:
"""
Check if more than X% of points werer dropped during outlier detection.
"""
outlier_protection_pct = self.freqai_config["feature_parameters"].get(
"outlier_protection_percentage", 30)
outlier_pct = (dropped_pts.sum() / len(dropped_pts)) * 100
if outlier_pct >= outlier_protection_pct:
return outlier_pct
else:
return 0.0
def use_SVM_to_remove_outliers(self, predict: bool) -> None:
"""
Build/inference a Support Vector Machine to detect outliers
in training data and prediction
:param predict: bool = If true, inference an existing SVM model, else construct one
"""
if self.keras:
logger.warning(
"SVM outlier removal not currently supported for Keras based models. "
"Skipping user requested function."
)
if predict:
self.do_predict = np.ones(len(self.data_dictionary["prediction_features"]))
return
if predict:
if not self.svm_model:
logger.warning("No svm model available for outlier removal")
return
y_pred = self.svm_model.predict(self.data_dictionary["prediction_features"])
do_predict = np.where(y_pred == -1, 0, y_pred)
if (len(do_predict) - do_predict.sum()) > 0:
logger.info(f"SVM tossed {len(do_predict) - do_predict.sum()} predictions.")
self.do_predict += do_predict
self.do_predict -= 1
else:
# use SGDOneClassSVM to increase speed?
svm_params = self.freqai_config["feature_parameters"].get(
"svm_params", {"shuffle": False, "nu": 0.1})
self.svm_model = linear_model.SGDOneClassSVM(**svm_params).fit(
self.data_dictionary["train_features"]
)
y_pred = self.svm_model.predict(self.data_dictionary["train_features"])
kept_points = np.where(y_pred == -1, 0, y_pred)
# keep_index = np.where(y_pred == 1)
outlier_pct = self.get_outlier_percentage(1 - kept_points)
if outlier_pct:
logger.warning(
f"SVM detected {outlier_pct:.2f}% of the points as outliers. "
f"Keeping original dataset."
)
self.svm_model = None
return
self.data_dictionary["train_features"] = self.data_dictionary["train_features"][
(y_pred == 1)
]
self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
(y_pred == 1)
]
self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
(y_pred == 1)
]
logger.info(
f"SVM tossed {len(y_pred) - kept_points.sum()}"
f" train points from {len(y_pred)} total points."
)
# same for test data
# TODO: This (and the part above) could be refactored into a separate function
# to reduce code duplication
if self.freqai_config['data_split_parameters'].get('test_size', 0.1) != 0:
y_pred = self.svm_model.predict(self.data_dictionary["test_features"])
kept_points = np.where(y_pred == -1, 0, y_pred)
self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
(y_pred == 1)
]
self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][(
y_pred == 1)]
self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][
(y_pred == 1)
]
logger.info(
f"{self.pair}: SVM tossed {len(y_pred) - kept_points.sum()}"
f" test points from {len(y_pred)} total points."
)
return
def use_DBSCAN_to_remove_outliers(self, predict: bool, eps=None) -> None:
"""
Use DBSCAN to cluster training data and remove "noisy" data (read outliers).
User controls this via the config param `DBSCAN_outlier_pct` which indicates the
pct of training data that they want to be considered outliers.
:param predict: bool = If False (training), iterate to find the best hyper parameters
to match user requested outlier percent target.
If True (prediction), use the parameters determined from
the previous training to estimate if the current prediction point
is an outlier.
"""
if predict:
if not self.data['DBSCAN_eps']:
return
train_ft_df = self.data_dictionary['train_features']
pred_ft_df = self.data_dictionary['prediction_features']
num_preds = len(pred_ft_df)
df = pd.concat([train_ft_df, pred_ft_df], axis=0, ignore_index=True)
clustering = DBSCAN(eps=self.data['DBSCAN_eps'],
min_samples=self.data['DBSCAN_min_samples'],
n_jobs=self.thread_count
).fit(df)
do_predict = np.where(clustering.labels_[-num_preds:] == -1, 0, 1)
if (len(do_predict) - do_predict.sum()) > 0:
logger.info(f"DBSCAN tossed {len(do_predict) - do_predict.sum()} predictions")
self.do_predict += do_predict
self.do_predict -= 1
else:
def normalise_distances(distances):
normalised_distances = (distances - distances.min()) / \
(distances.max() - distances.min())
return normalised_distances
def rotate_point(origin, point, angle):
# rotate a point counterclockwise by a given angle (in radians)
# around a given origin
x = origin[0] + cos(angle) * (point[0] - origin[0]) - \
sin(angle) * (point[1] - origin[1])
y = origin[1] + sin(angle) * (point[0] - origin[0]) + \
cos(angle) * (point[1] - origin[1])
return (x, y)
MinPts = int(len(self.data_dictionary['train_features'].index) * 0.25)
# measure pairwise distances to nearest neighbours
neighbors = NearestNeighbors(
n_neighbors=MinPts, n_jobs=self.thread_count)
neighbors_fit = neighbors.fit(self.data_dictionary['train_features'])
distances, _ = neighbors_fit.kneighbors(self.data_dictionary['train_features'])
distances = np.sort(distances, axis=0).mean(axis=1)
normalised_distances = normalise_distances(distances)
x_range = np.linspace(0, 1, len(distances))
line = np.linspace(normalised_distances[0],
normalised_distances[-1], len(normalised_distances))
deflection = np.abs(normalised_distances - line)
max_deflection_loc = np.where(deflection == deflection.max())[0][0]
origin = x_range[max_deflection_loc], line[max_deflection_loc]
point = x_range[max_deflection_loc], normalised_distances[max_deflection_loc]
rot_angle = np.pi / 4
elbow_loc = rotate_point(origin, point, rot_angle)
epsilon = elbow_loc[1] * (distances[-1] - distances[0]) + distances[0]
clustering = DBSCAN(eps=epsilon, min_samples=MinPts,
n_jobs=int(self.thread_count)).fit(
self.data_dictionary['train_features']
)
logger.info(f'DBSCAN found eps of {epsilon:.2f}.')
self.data['DBSCAN_eps'] = epsilon
self.data['DBSCAN_min_samples'] = MinPts
dropped_points = np.where(clustering.labels_ == -1, 1, 0)
outlier_pct = self.get_outlier_percentage(dropped_points)
if outlier_pct:
logger.warning(
f"DBSCAN detected {outlier_pct:.2f}% of the points as outliers. "
f"Keeping original dataset."
)
self.data['DBSCAN_eps'] = 0
return
self.data_dictionary['train_features'] = self.data_dictionary['train_features'][
(clustering.labels_ != -1)
]
self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
(clustering.labels_ != -1)
]
self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
(clustering.labels_ != -1)
]
logger.info(
f"DBSCAN tossed {dropped_points.sum()}"
f" train points from {len(clustering.labels_)}"
)
return
def compute_inlier_metric(self, set_='train') -> None:
"""
Compute inlier metric from backwards distance distributions.
This metric defines how well features from a timepoint fit
into previous timepoints.
"""
def normalise(dataframe: DataFrame, key: str) -> DataFrame:
if set_ == 'train':
min_value = dataframe.min()
max_value = dataframe.max()
self.data[f'{key}_min'] = min_value
self.data[f'{key}_max'] = max_value
else:
min_value = self.data[f'{key}_min']
max_value = self.data[f'{key}_max']
return (dataframe - min_value) / (max_value - min_value)
no_prev_pts = self.freqai_config["feature_parameters"]["inlier_metric_window"]
if set_ == 'train':
compute_df = copy.deepcopy(self.data_dictionary['train_features'])
elif set_ == 'test':
compute_df = copy.deepcopy(self.data_dictionary['test_features'])
else:
compute_df = copy.deepcopy(self.data_dictionary['prediction_features'])
compute_df_reindexed = compute_df.reindex(
index=np.flip(compute_df.index)
)
pairwise = pd.DataFrame(
np.triu(
pairwise_distances(compute_df_reindexed, n_jobs=self.thread_count)
),
columns=compute_df_reindexed.index,
index=compute_df_reindexed.index
)
pairwise = pairwise.round(5)
column_labels = [
'{}{}'.format('d', i) for i in range(1, no_prev_pts + 1)
]
distances = pd.DataFrame(
columns=column_labels, index=compute_df.index
)
for index in compute_df.index[no_prev_pts:]:
current_row = pairwise.loc[[index]]
current_row_no_zeros = current_row.loc[
:, (current_row != 0).any(axis=0)
]
distances.loc[[index]] = current_row_no_zeros.iloc[
:, :no_prev_pts
]
distances = distances.replace([np.inf, -np.inf], np.nan)
drop_index = pd.isnull(distances).any(axis=1)
distances = distances[drop_index == 0]
inliers = pd.DataFrame(index=distances.index)
for key in distances.keys():
current_distances = distances[key].dropna()
current_distances = normalise(current_distances, key)
if set_ == 'train':
fit_params = stats.weibull_min.fit(current_distances)
self.data[f'{key}_fit_params'] = fit_params
else:
fit_params = self.data[f'{key}_fit_params']
quantiles = stats.weibull_min.cdf(current_distances, *fit_params)
df_inlier = pd.DataFrame(
{key: quantiles}, index=distances.index
)
inliers = pd.concat(
[inliers, df_inlier], axis=1
)
inlier_metric = pd.DataFrame(
data=inliers.sum(axis=1) / no_prev_pts,
columns=['%-inlier_metric'],
index=compute_df.index
)
inlier_metric = (2 * (inlier_metric - inlier_metric.min()) /
(inlier_metric.max() - inlier_metric.min()) - 1)
if set_ in ('train', 'test'):
inlier_metric = inlier_metric.iloc[no_prev_pts:]
compute_df = compute_df.iloc[no_prev_pts:]
self.remove_beginning_points_from_data_dict(set_, no_prev_pts)
self.data_dictionary[f'{set_}_features'] = pd.concat(
[compute_df, inlier_metric], axis=1)
else:
self.data_dictionary['prediction_features'] = pd.concat(
[compute_df, inlier_metric], axis=1)
self.data_dictionary['prediction_features'].fillna(0, inplace=True)
logger.info('Inlier metric computed and added to features.')
return None
def remove_beginning_points_from_data_dict(self, set_='train', no_prev_pts: int = 10):
features = self.data_dictionary[f'{set_}_features']
weights = self.data_dictionary[f'{set_}_weights']
labels = self.data_dictionary[f'{set_}_labels']
self.data_dictionary[f'{set_}_weights'] = weights[no_prev_pts:]
self.data_dictionary[f'{set_}_features'] = features.iloc[no_prev_pts:]
self.data_dictionary[f'{set_}_labels'] = labels.iloc[no_prev_pts:]
def add_noise_to_training_features(self) -> None:
"""
Add noise to train features to reduce the risk of overfitting.
"""
mu = 0 # no shift
sigma = self.freqai_config["feature_parameters"]["noise_standard_deviation"]
compute_df = self.data_dictionary['train_features']
noise = np.random.normal(mu, sigma, [compute_df.shape[0], compute_df.shape[1]])
self.data_dictionary['train_features'] += noise
return
def find_features(self, dataframe: DataFrame) -> None:
"""
Find features in the strategy provided dataframe
@@ -925,37 +391,6 @@ class FreqaiDataKitchen:
labels = [c for c in column_names if "&" in c]
self.label_list = labels
def check_if_pred_in_training_spaces(self) -> None:
"""
Compares the distance from each prediction point to each training data
point. It uses this information to estimate a Dissimilarity Index (DI)
and avoid making predictions on any points that are too far away
from the training data set.
"""
distance = pairwise_distances(
self.data_dictionary["train_features"],
self.data_dictionary["prediction_features"],
n_jobs=self.thread_count,
)
self.DI_values = distance.min(axis=0) / self.data["avg_mean_dist"]
do_predict = np.where(
self.DI_values < self.freqai_config["feature_parameters"]["DI_threshold"],
1,
0,
)
if (len(do_predict) - do_predict.sum()) > 0:
logger.info(
f"{self.pair}: DI tossed {len(do_predict) - do_predict.sum()} predictions for "
"being too far from training data."
)
self.do_predict += do_predict
self.do_predict -= 1
def set_weights_higher_recent(self, num_weights: int) -> npt.ArrayLike:
"""
Set weights so that recent data is more heavily weighted during
@@ -1325,9 +760,9 @@ class FreqaiDataKitchen:
" which was deprecated on March 1, 2023. Please refer "
"to the strategy migration guide to use the new "
"feature_engineering_* methods: \n"
"https://www.freqtrade.io/en/stable/strategy_migration/#freqai-strategy \n"
f"{DOCS_LINK}/strategy_migration/#freqai-strategy \n"
"And the feature_engineering_* documentation: \n"
"https://www.freqtrade.io/en/latest/freqai-feature-engineering/"
f"{DOCS_LINK}/freqai-feature-engineering/"
)
tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes")
@@ -1515,3 +950,32 @@ class FreqaiDataKitchen:
timerange.startts += buffer * timeframe_to_seconds(self.config["timeframe"])
return timerange
# deprecated functions
def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
"""
Deprecation warning, migration assistance
"""
logger.warning(f"Your custom IFreqaiModel relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline "
"We added a basic pipeline for you, but this will be removed "
"in a future version.")
return data_dictionary
def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame:
"""
Deprecation warning, migration assistance
"""
logger.warning(f"Your custom IFreqaiModel relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline "
"We added a basic pipeline for you, but this will be removed "
"in a future version.")
pred_df, _, _ = self.label_pipeline.inverse_transform(df)
return pred_df

View File

@@ -7,14 +7,18 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Tuple
import datasieve.transforms as ds
import numpy as np
import pandas as pd
import psutil
from datasieve.pipeline import Pipeline
from datasieve.transforms import SKLearnWrapper
from numpy.typing import NDArray
from pandas import DataFrame
from sklearn.preprocessing import MinMaxScaler
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.constants import DOCS_LINK, Config
from freqtrade.data.dataprovider import DataProvider
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
@@ -503,68 +507,43 @@ class IFreqaiModel(ABC):
"feature_engineering_* functions"
)
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
"""
Base data cleaning method for train.
Functions here improve/modify the input data by identifying outliers,
computing additional metrics, adding noise, reducing dimensionality etc.
"""
def define_data_pipeline(self, threads=-1) -> Pipeline:
ft_params = self.freqai_info["feature_parameters"]
pipe_steps = [
('const', ds.VarianceThreshold(threshold=0)),
('scaler', SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1))))
]
if ft_params.get('inlier_metric_window', 0):
dk.compute_inlier_metric(set_='train')
if self.freqai_info["data_split_parameters"]["test_size"] > 0:
dk.compute_inlier_metric(set_='test')
if ft_params.get(
"principal_component_analysis", False
):
dk.principal_component_analysis()
if ft_params.get("principal_component_analysis", False):
pipe_steps.append(('pca', ds.PCA(n_components=0.999)))
pipe_steps.append(('post-pca-scaler',
SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1)))))
if ft_params.get("use_SVM_to_remove_outliers", False):
dk.use_SVM_to_remove_outliers(predict=False)
svm_params = ft_params.get(
"svm_params", {"shuffle": False, "nu": 0.01})
pipe_steps.append(('svm', ds.SVMOutlierExtractor(**svm_params)))
if ft_params.get("DI_threshold", 0):
dk.data["avg_mean_dist"] = dk.compute_distances()
di = ft_params.get("DI_threshold", 0)
if di:
pipe_steps.append(('di', ds.DissimilarityIndex(di_threshold=di, n_jobs=threads)))
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
if dk.pair in self.dd.old_DBSCAN_eps:
eps = self.dd.old_DBSCAN_eps[dk.pair]
else:
eps = None
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
pipe_steps.append(('dbscan', ds.DBSCAN(n_jobs=threads)))
if self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0):
dk.add_noise_to_training_features()
sigma = self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0)
if sigma:
pipe_steps.append(('noise', ds.Noise(sigma=sigma)))
def data_cleaning_predict(self, dk: FreqaiDataKitchen) -> None:
"""
Base data cleaning method for predict.
Functions here are complementary to the functions of data_cleaning_train.
"""
ft_params = self.freqai_info["feature_parameters"]
return Pipeline(pipe_steps)
# ensure user is feeding the correct indicators to the model
self.check_if_feature_list_matches_strategy(dk)
def define_label_pipeline(self, threads=-1) -> Pipeline:
if ft_params.get('inlier_metric_window', 0):
dk.compute_inlier_metric(set_='predict')
label_pipeline = Pipeline([
('scaler', SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1))))
])
if ft_params.get(
"principal_component_analysis", False
):
dk.pca_transform(dk.data_dictionary['prediction_features'])
if ft_params.get("use_SVM_to_remove_outliers", False):
dk.use_SVM_to_remove_outliers(predict=True)
if ft_params.get("DI_threshold", 0):
dk.check_if_pred_in_training_spaces()
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
dk.use_DBSCAN_to_remove_outliers(predict=True)
return label_pipeline
def model_exists(self, dk: FreqaiDataKitchen) -> bool:
"""
@@ -576,8 +555,6 @@ class IFreqaiModel(ABC):
"""
if self.dd.model_type == 'joblib':
file_type = ".joblib"
elif self.dd.model_type == 'keras':
file_type = ".h5"
elif self.dd.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
file_type = ".zip"
@@ -701,7 +678,7 @@ class IFreqaiModel(ABC):
# # for keras type models, the conv_window needs to be prepended so
# # viewing is correct in frequi
if self.freqai_info.get('keras', False) or self.ft_params.get('inlier_metric_window', 0):
if self.ft_params.get('inlier_metric_window', 0):
n_lost_points = self.freqai_info.get('conv_width', 2)
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
columns=hist_preds_df.columns)
@@ -991,3 +968,50 @@ class IFreqaiModel(ABC):
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (i.e. SVM and/or DI index)
"""
# deprecated functions
def data_cleaning_train(self, dk: FreqaiDataKitchen, pair: str):
"""
throw deprecation warning if this function is called
"""
logger.warning(f"Your model {self.__class__.__name__} relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline")
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dd = dk.data_dictionary
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
return
def data_cleaning_predict(self, dk: FreqaiDataKitchen, pair: str):
"""
throw deprecation warning if this function is called
"""
logger.warning(f"Your model {self.__class__.__name__} relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline")
dd = dk.data_dictionary
dd["predict_features"], outliers, _ = dk.feature_pipeline.transform(
dd["predict_features"], outlier_check=True)
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return

View File

@@ -103,13 +103,13 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor):
"""
dk.find_features(unfiltered_df)
filtered_df, _ = dk.filter_features(
dk.data_dictionary["prediction_features"], _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
x = self.data_convertor.convert_x(
dk.data_dictionary["prediction_features"],
device=self.device
@@ -131,7 +131,13 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor):
yb = yb.cpu().squeeze()
pred_df = pd.DataFrame(yb.detach().numpy(), columns=dk.label_list)
pred_df = dk.denormalize_labels_from_metadata(pred_df)
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
if x.shape[1] > 1:
zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))),

View File

@@ -2,7 +2,8 @@ import logging
from typing import Any, Dict
from pandas import DataFrame
from stable_baselines3.common.callbacks import EvalCallback
from sb3_contrib.common.maskable.callbacks import MaskableEvalCallback
from sb3_contrib.common.maskable.utils import is_masking_supported
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
@@ -55,9 +56,11 @@ class ReinforcementLearner_multiproc(ReinforcementLearner):
env_info=env_info) for i
in range(self.max_threads)]))
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,
best_model_save_path=str(dk.data_path))
self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,
best_model_save_path=str(dk.data_path),
use_masking=(self.model_type == 'MaskablePPO' and
is_masking_supported(self.eval_env)))
# TENSORBOARD CALLBACK DOES NOT RECOMMENDED TO USE WITH MULTIPLE ENVS,
# IT WILL RETURN FALSE INFORMATIONS, NEVERTHLESS NOT THREAD SAFE WITH SB3!!!

View File

@@ -5,6 +5,7 @@ from xgboost import XGBRFRegressor
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.tensorboard import TBCallback
logger = logging.getLogger(__name__)
@@ -44,7 +45,10 @@ class XGBoostRFRegressor(BaseRegressionModel):
model = XGBRFRegressor(**self.model_training_parameters)
model.set_params(callbacks=[TBCallback(dk.data_path)], activate=self.activate_tensorboard)
model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set,
sample_weight_eval_set=eval_weights, xgb_model=xgb_model)
# set the callbacks to empty so that we can serialize to disk later
model.set_params(callbacks=[])
return model

View File

@@ -1302,6 +1302,10 @@ class FreqtradeBot(LoggingMixin):
f"(orderid:{order['id']}) in order to add another one ...")
self.cancel_stoploss_on_exchange(trade)
if not trade.is_open:
logger.warning(
f"Trade {trade} is closed, not creating trailing stoploss order.")
return
# Create new stoploss order
if not self.create_stoploss_order(trade=trade, stop_price=stoploss_norm):

View File

@@ -5,6 +5,7 @@ from logging.handlers import RotatingFileHandler, SysLogHandler
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
from freqtrade.loggers.buffering_handler import FTBufferingHandler
from freqtrade.loggers.set_log_levels import set_loggers
from freqtrade.loggers.std_err_stream_handler import FTStdErrStreamHandler
@@ -16,29 +17,6 @@ bufferHandler = FTBufferingHandler(1000)
bufferHandler.setFormatter(Formatter(LOGFORMAT))
def _set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None:
"""
Set the logging level for third party libraries
:return: None
"""
logging.getLogger('requests').setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger("urllib3").setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger('ccxt.base.exchange').setLevel(
logging.INFO if verbosity <= 2 else logging.DEBUG
)
logging.getLogger('telegram').setLevel(logging.INFO)
logging.getLogger('httpx').setLevel(logging.INFO)
logging.getLogger('werkzeug').setLevel(
logging.ERROR if api_verbosity == 'error' else logging.INFO
)
def get_existing_handlers(handlertype):
"""
Returns Existing handler or None (if the handler has not yet been added to the root handlers).
@@ -115,6 +93,6 @@ def setup_logging(config: Config) -> None:
logging.root.addHandler(handler_rf)
logging.root.setLevel(logging.INFO if verbosity < 1 else logging.DEBUG)
_set_loggers(verbosity, config.get('api_server', {}).get('verbosity', 'info'))
set_loggers(verbosity, config.get('api_server', {}).get('verbosity', 'info'))
logger.info('Verbosity set to %s', verbosity)

View File

@@ -0,0 +1,55 @@
import logging
logger = logging.getLogger(__name__)
def set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None:
"""
Set the logging level for third party libraries
:return: None
"""
logging.getLogger('requests').setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger("urllib3").setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger('ccxt.base.exchange').setLevel(
logging.INFO if verbosity <= 2 else logging.DEBUG
)
logging.getLogger('telegram').setLevel(logging.INFO)
logging.getLogger('httpx').setLevel(logging.WARNING)
logging.getLogger('werkzeug').setLevel(
logging.ERROR if api_verbosity == 'error' else logging.INFO
)
__BIAS_TESTER_LOGGERS = [
'freqtrade.resolvers',
'freqtrade.strategy.hyper',
'freqtrade.configuration.config_validation',
]
def reduce_verbosity_for_bias_tester() -> None:
"""
Reduce verbosity for bias tester.
It loads the same strategy several times, which would spam the log.
"""
logger.info("Reducing verbosity for bias tester.")
for logger_name in __BIAS_TESTER_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.WARNING)
def restore_verbosity_for_bias_tester() -> None:
"""
Restore verbosity after bias tester.
"""
logger.info("Restoring log verbosity.")
log_level = logging.NOTSET
for logger_name in __BIAS_TESTER_LOGGERS:
logging.getLogger(logger_name).setLevel(log_level)

View File

@@ -3,7 +3,7 @@ from typing import Callable
from cachetools import TTLCache, cached
class LoggingMixin():
class LoggingMixin:
"""
Logging Mixin
Shows similar messages only once every `refresh_period`.

View File

@@ -24,6 +24,7 @@ from freqtrade.enums import (BacktestState, CandleType, ExitCheckTuple, ExitType
from freqtrade.exceptions import DependencyException, OperationalException
from freqtrade.exchange import (amount_to_contract_precision, price_to_precision,
timeframe_to_minutes, timeframe_to_seconds)
from freqtrade.exchange.exchange import Exchange
from freqtrade.mixins import LoggingMixin
from freqtrade.optimize.backtest_caching import get_strategy_run_id
from freqtrade.optimize.bt_progress import BTProgress
@@ -72,7 +73,7 @@ class Backtesting:
backtesting.start()
"""
def __init__(self, config: Config) -> None:
def __init__(self, config: Config, exchange: Optional[Exchange] = None) -> None:
LoggingMixin.show_output = False
self.config = config
@@ -89,7 +90,10 @@ class Backtesting:
self.rejected_df: Dict[str, Dict] = {}
self._exchange_name = self.config['exchange']['name']
self.exchange = ExchangeResolver.load_exchange(self.config, load_leverage_tiers=True)
if not exchange:
exchange = ExchangeResolver.load_exchange(self.config, load_leverage_tiers=True)
self.exchange = exchange
self.dataprovider = DataProvider(self.config, self.exchange)
if self.config.get('strategy_list'):
@@ -114,16 +118,7 @@ class Backtesting:
self.timeframe_min = timeframe_to_minutes(self.timeframe)
self.init_backtest_detail()
self.pairlists = PairListManager(self.exchange, self.config, self.dataprovider)
if 'VolumePairList' in self.pairlists.name_list:
raise OperationalException("VolumePairList not allowed for backtesting. "
"Please use StaticPairList instead.")
if 'PerformanceFilter' in self.pairlists.name_list:
raise OperationalException("PerformanceFilter not allowed for backtesting.")
if len(self.strategylist) > 1 and 'PrecisionFilter' in self.pairlists.name_list:
raise OperationalException(
"PrecisionFilter not allowed for backtesting multiple strategies."
)
self._validate_pairlists_for_backtesting()
self.dataprovider.add_pairlisthandler(self.pairlists)
self.pairlists.refresh_pairlist()
@@ -164,6 +159,18 @@ class Backtesting:
self.init_backtest()
def _validate_pairlists_for_backtesting(self):
if 'VolumePairList' in self.pairlists.name_list:
raise OperationalException("VolumePairList not allowed for backtesting. "
"Please use StaticPairList instead.")
if 'PerformanceFilter' in self.pairlists.name_list:
raise OperationalException("PerformanceFilter not allowed for backtesting.")
if len(self.strategylist) > 1 and 'PrecisionFilter' in self.pairlists.name_list:
raise OperationalException(
"PrecisionFilter not allowed for backtesting multiple strategies."
)
@staticmethod
def cleanup():
LoggingMixin.show_output = True

View File

@@ -35,7 +35,7 @@ def hyperopt_serializer(x):
return str(x)
class HyperoptStateContainer():
class HyperoptStateContainer:
""" Singleton class to track state of hyperopt"""
state: HyperoptState = HyperoptState.OPTIMIZE
@@ -44,7 +44,7 @@ class HyperoptStateContainer():
cls.state = value
class HyperoptTools():
class HyperoptTools:
@staticmethod
def get_strategy_filename(config: Config, strategy_name: str) -> Optional[Path]:

View File

@@ -0,0 +1,275 @@
import logging
import shutil
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.data.history import get_timerange
from freqtrade.exchange import timeframe_to_minutes
from freqtrade.loggers.set_log_levels import (reduce_verbosity_for_bias_tester,
restore_verbosity_for_bias_tester)
from freqtrade.optimize.backtesting import Backtesting
logger = logging.getLogger(__name__)
class VarHolder:
timerange: TimeRange
data: DataFrame
indicators: Dict[str, DataFrame]
result: DataFrame
compared: DataFrame
from_dt: datetime
to_dt: datetime
compared_dt: datetime
timeframe: str
class Analysis:
def __init__(self) -> None:
self.total_signals = 0
self.false_entry_signals = 0
self.false_exit_signals = 0
self.false_indicators: List[str] = []
self.has_bias = False
class LookaheadAnalysis:
def __init__(self, config: Dict[str, Any], strategy_obj: Dict):
self.failed_bias_check = True
self.full_varHolder = VarHolder()
self.entry_varHolders: List[VarHolder] = []
self.exit_varHolders: List[VarHolder] = []
self.exchange: Optional[Any] = None
# pull variables the scope of the lookahead_analysis-instance
self.local_config = deepcopy(config)
self.local_config['strategy'] = strategy_obj['name']
self.current_analysis = Analysis()
self.minimum_trade_amount = config['minimum_trade_amount']
self.targeted_trade_amount = config['targeted_trade_amount']
self.strategy_obj = strategy_obj
@staticmethod
def dt_to_timestamp(dt: datetime):
timestamp = int(dt.replace(tzinfo=timezone.utc).timestamp())
return timestamp
@staticmethod
def get_result(backtesting: Backtesting, processed: DataFrame):
min_date, max_date = get_timerange(processed)
result = backtesting.backtest(
processed=deepcopy(processed),
start_date=min_date,
end_date=max_date
)
return result
@staticmethod
def report_signal(result: dict, column_name: str, checked_timestamp: datetime):
df = result['results']
row_count = df[column_name].shape[0]
if row_count == 0:
return False
else:
df_cut = df[(df[column_name] == checked_timestamp)]
if df_cut[column_name].shape[0] == 0:
return False
else:
return True
return False
# analyzes two data frames with processed indicators and shows differences between them.
def analyze_indicators(self, full_vars: VarHolder, cut_vars: VarHolder, current_pair: str):
# extract dataframes
cut_df: DataFrame = cut_vars.indicators[current_pair]
full_df: DataFrame = full_vars.indicators[current_pair]
# cut longer dataframe to length of the shorter
full_df_cut = full_df[
(full_df.date == cut_vars.compared_dt)
].reset_index(drop=True)
cut_df_cut = cut_df[
(cut_df.date == cut_vars.compared_dt)
].reset_index(drop=True)
# check if dataframes are not empty
if full_df_cut.shape[0] != 0 and cut_df_cut.shape[0] != 0:
# compare dataframes
compare_df = full_df_cut.compare(cut_df_cut)
if compare_df.shape[0] > 0:
for col_name, values in compare_df.items():
col_idx = compare_df.columns.get_loc(col_name)
compare_df_row = compare_df.iloc[0]
# compare_df now comprises tuples with [1] having either 'self' or 'other'
if 'other' in col_name[1]:
continue
self_value = compare_df_row[col_idx]
other_value = compare_df_row[col_idx + 1]
# output differences
if self_value != other_value:
if not self.current_analysis.false_indicators.__contains__(col_name[0]):
self.current_analysis.false_indicators.append(col_name[0])
logger.info(f"=> found look ahead bias in indicator "
f"{col_name[0]}. "
f"{str(self_value)} != {str(other_value)}")
def prepare_data(self, varholder: VarHolder, pairs_to_load: List[DataFrame]):
if 'freqai' in self.local_config and 'identifier' in self.local_config['freqai']:
# purge previous data if the freqai model is defined
# (to be sure nothing is carried over from older backtests)
path_to_current_identifier = (
Path(f"{self.local_config['user_data_dir']}/models/"
f"{self.local_config['freqai']['identifier']}").resolve())
# remove folder and its contents
if Path.exists(path_to_current_identifier):
shutil.rmtree(path_to_current_identifier)
prepare_data_config = deepcopy(self.local_config)
prepare_data_config['timerange'] = (str(self.dt_to_timestamp(varholder.from_dt)) + "-" +
str(self.dt_to_timestamp(varholder.to_dt)))
prepare_data_config['exchange']['pair_whitelist'] = pairs_to_load
backtesting = Backtesting(prepare_data_config, self.exchange)
self.exchange = backtesting.exchange
backtesting._set_strategy(backtesting.strategylist[0])
varholder.data, varholder.timerange = backtesting.load_bt_data()
backtesting.load_bt_data_detail()
varholder.timeframe = backtesting.timeframe
varholder.indicators = backtesting.strategy.advise_all_indicators(varholder.data)
varholder.result = self.get_result(backtesting, varholder.indicators)
def fill_full_varholder(self):
self.full_varHolder = VarHolder()
# define datetime in human-readable format
parsed_timerange = TimeRange.parse_timerange(self.local_config['timerange'])
if parsed_timerange.startdt is None:
self.full_varHolder.from_dt = datetime.fromtimestamp(0, tz=timezone.utc)
else:
self.full_varHolder.from_dt = parsed_timerange.startdt
if parsed_timerange.stopdt is None:
self.full_varHolder.to_dt = datetime.utcnow()
else:
self.full_varHolder.to_dt = parsed_timerange.stopdt
self.prepare_data(self.full_varHolder, self.local_config['pairs'])
def fill_entry_and_exit_varHolders(self, result_row):
# entry_varHolder
entry_varHolder = VarHolder()
self.entry_varHolders.append(entry_varHolder)
entry_varHolder.from_dt = self.full_varHolder.from_dt
entry_varHolder.compared_dt = result_row['open_date']
# to_dt needs +1 candle since it won't buy on the last candle
entry_varHolder.to_dt = (
result_row['open_date'] +
timedelta(minutes=timeframe_to_minutes(self.full_varHolder.timeframe)))
self.prepare_data(entry_varHolder, [result_row['pair']])
# exit_varHolder
exit_varHolder = VarHolder()
self.exit_varHolders.append(exit_varHolder)
# to_dt needs +1 candle since it will always exit/force-exit trades on the last candle
exit_varHolder.from_dt = self.full_varHolder.from_dt
exit_varHolder.to_dt = (
result_row['close_date'] +
timedelta(minutes=timeframe_to_minutes(self.full_varHolder.timeframe)))
exit_varHolder.compared_dt = result_row['close_date']
self.prepare_data(exit_varHolder, [result_row['pair']])
# now we analyze a full trade of full_varholder and look for analyze its bias
def analyze_row(self, idx, result_row):
# if force-sold, ignore this signal since here it will unconditionally exit.
if result_row.close_date == self.dt_to_timestamp(self.full_varHolder.to_dt):
return
# keep track of how many signals are processed at total
self.current_analysis.total_signals += 1
# fill entry_varHolder and exit_varHolder
self.fill_entry_and_exit_varHolders(result_row)
# register if buy signal is broken
if not self.report_signal(
self.entry_varHolders[idx].result,
"open_date",
self.entry_varHolders[idx].compared_dt):
self.current_analysis.false_entry_signals += 1
# register if buy or sell signal is broken
if not self.report_signal(
self.exit_varHolders[idx].result,
"close_date",
self.exit_varHolders[idx].compared_dt):
self.current_analysis.false_exit_signals += 1
# check if the indicators themselves contain biased data
self.analyze_indicators(self.full_varHolder, self.entry_varHolders[idx], result_row['pair'])
self.analyze_indicators(self.full_varHolder, self.exit_varHolders[idx], result_row['pair'])
def start(self) -> None:
# first make a single backtest
self.fill_full_varholder()
reduce_verbosity_for_bias_tester()
# check if requirements have been met of full_varholder
found_signals: int = self.full_varHolder.result['results'].shape[0] + 1
if found_signals >= self.targeted_trade_amount:
logger.info(f"Found {found_signals} trades, "
f"calculating {self.targeted_trade_amount} trades.")
elif self.targeted_trade_amount >= found_signals >= self.minimum_trade_amount:
logger.info(f"Only found {found_signals} trades. Calculating all available trades.")
else:
logger.info(f"found {found_signals} trades "
f"which is less than minimum_trade_amount {self.minimum_trade_amount}. "
f"Cancelling this backtest lookahead bias test.")
return
# now we loop through all signals
# starting from the same datetime to avoid miss-reports of bias
for idx, result_row in self.full_varHolder.result['results'].iterrows():
if self.current_analysis.total_signals == self.targeted_trade_amount:
break
self.analyze_row(idx, result_row)
# Restore verbosity, so it's not too quiet for the next strategy
restore_verbosity_for_bias_tester()
# check and report signals
if self.current_analysis.total_signals < self.local_config['minimum_trade_amount']:
logger.info(f" -> {self.local_config['strategy']} : too few trades. "
f"We only found {self.current_analysis.total_signals} trades. "
f"Hint: Extend the timerange "
f"to get at least {self.local_config['minimum_trade_amount']} "
f"or lower the value of minimum_trade_amount.")
self.failed_bias_check = True
elif (self.current_analysis.false_entry_signals > 0 or
self.current_analysis.false_exit_signals > 0 or
len(self.current_analysis.false_indicators) > 0):
logger.info(f" => {self.local_config['strategy']} : bias detected!")
self.current_analysis.has_bias = True
self.failed_bias_check = False
else:
logger.info(self.local_config['strategy'] + ": no bias detected")
self.failed_bias_check = False

View File

@@ -0,0 +1,202 @@
import logging
import time
from pathlib import Path
from typing import Any, Dict, List
import pandas as pd
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
from freqtrade.optimize.lookahead_analysis import LookaheadAnalysis
from freqtrade.resolvers import StrategyResolver
logger = logging.getLogger(__name__)
class LookaheadAnalysisSubFunctions:
@staticmethod
def text_table_lookahead_analysis_instances(
config: Dict[str, Any],
lookahead_instances: List[LookaheadAnalysis]):
headers = ['filename', 'strategy', 'has_bias', 'total_signals',
'biased_entry_signals', 'biased_exit_signals', 'biased_indicators']
data = []
for inst in lookahead_instances:
if config['minimum_trade_amount'] > inst.current_analysis.total_signals:
data.append(
[
inst.strategy_obj['location'].parts[-1],
inst.strategy_obj['name'],
"too few trades caught "
f"({inst.current_analysis.total_signals}/{config['minimum_trade_amount']})."
f"Test failed."
]
)
elif inst.failed_bias_check:
data.append(
[
inst.strategy_obj['location'].parts[-1],
inst.strategy_obj['name'],
'error while checking'
]
)
else:
data.append(
[
inst.strategy_obj['location'].parts[-1],
inst.strategy_obj['name'],
inst.current_analysis.has_bias,
inst.current_analysis.total_signals,
inst.current_analysis.false_entry_signals,
inst.current_analysis.false_exit_signals,
", ".join(inst.current_analysis.false_indicators)
]
)
from tabulate import tabulate
table = tabulate(data, headers=headers, tablefmt="orgtbl")
print(table)
return table, headers, data
@staticmethod
def export_to_csv(config: Dict[str, Any], lookahead_analysis: List[LookaheadAnalysis]):
def add_or_update_row(df, row_data):
if (
(df['filename'] == row_data['filename']) &
(df['strategy'] == row_data['strategy'])
).any():
# Update existing row
pd_series = pd.DataFrame([row_data])
df.loc[
(df['filename'] == row_data['filename']) &
(df['strategy'] == row_data['strategy'])
] = pd_series
else:
# Add new row
df = pd.concat([df, pd.DataFrame([row_data], columns=df.columns)])
return df
if Path(config['lookahead_analysis_exportfilename']).exists():
# Read CSV file into a pandas dataframe
csv_df = pd.read_csv(config['lookahead_analysis_exportfilename'])
else:
# Create a new empty DataFrame with the desired column names and set the index
csv_df = pd.DataFrame(columns=[
'filename', 'strategy', 'has_bias', 'total_signals',
'biased_entry_signals', 'biased_exit_signals', 'biased_indicators'
],
index=None)
for inst in lookahead_analysis:
# only update if
if (inst.current_analysis.total_signals > config['minimum_trade_amount']
and inst.failed_bias_check is not True):
new_row_data = {'filename': inst.strategy_obj['location'].parts[-1],
'strategy': inst.strategy_obj['name'],
'has_bias': inst.current_analysis.has_bias,
'total_signals':
int(inst.current_analysis.total_signals),
'biased_entry_signals':
int(inst.current_analysis.false_entry_signals),
'biased_exit_signals':
int(inst.current_analysis.false_exit_signals),
'biased_indicators':
",".join(inst.current_analysis.false_indicators)}
csv_df = add_or_update_row(csv_df, new_row_data)
# Fill NaN values with a default value (e.g., 0)
csv_df['total_signals'] = csv_df['total_signals'].fillna(0)
csv_df['biased_entry_signals'] = csv_df['biased_entry_signals'].fillna(0)
csv_df['biased_exit_signals'] = csv_df['biased_exit_signals'].fillna(0)
# Convert columns to integers
csv_df['total_signals'] = csv_df['total_signals'].astype(int)
csv_df['biased_entry_signals'] = csv_df['biased_entry_signals'].astype(int)
csv_df['biased_exit_signals'] = csv_df['biased_exit_signals'].astype(int)
logger.info(f"saving {config['lookahead_analysis_exportfilename']}")
csv_df.to_csv(config['lookahead_analysis_exportfilename'], index=False)
@staticmethod
def calculate_config_overrides(config: Config):
if config['targeted_trade_amount'] < config['minimum_trade_amount']:
# this combo doesn't make any sense.
raise OperationalException(
"Targeted trade amount can't be smaller than minimum trade amount."
)
if len(config['pairs']) > config['max_open_trades']:
logger.info('Max_open_trades were less than amount of pairs. '
'Set max_open_trades to amount of pairs just to avoid false positives.')
config['max_open_trades'] = len(config['pairs'])
min_dry_run_wallet = 1000000000
if config['dry_run_wallet'] < min_dry_run_wallet:
logger.info('Dry run wallet was not set to 1 billion, pushing it up there '
'just to avoid false positives')
config['dry_run_wallet'] = min_dry_run_wallet
# enforce cache to be 'none', shift it to 'none' if not already
# (since the default value is 'day')
if config.get('backtest_cache') is None:
config['backtest_cache'] = 'none'
elif config['backtest_cache'] != 'none':
logger.info(f"backtest_cache = "
f"{config['backtest_cache']} detected. "
f"Inside lookahead-analysis it is enforced to be 'none'. "
f"Changed it to 'none'")
config['backtest_cache'] = 'none'
return config
@staticmethod
def initialize_single_lookahead_analysis(config: Config, strategy_obj: Dict[str, Any]):
logger.info(f"Bias test of {Path(strategy_obj['location']).name} started.")
start = time.perf_counter()
current_instance = LookaheadAnalysis(config, strategy_obj)
current_instance.start()
elapsed = time.perf_counter() - start
logger.info(f"Checking look ahead bias via backtests "
f"of {Path(strategy_obj['location']).name} "
f"took {elapsed:.0f} seconds.")
return current_instance
@staticmethod
def start(config: Config):
config = LookaheadAnalysisSubFunctions.calculate_config_overrides(config)
strategy_objs = StrategyResolver.search_all_objects(
config, enum_failed=False, recursive=config.get('recursive_strategy_search', False))
lookaheadAnalysis_instances = []
# unify --strategy and --strategy_list to one list
if not (strategy_list := config.get('strategy_list', [])):
if config.get('strategy') is None:
raise OperationalException(
"No Strategy specified. Please specify a strategy via --strategy or "
"--strategy_list"
)
strategy_list = [config['strategy']]
# check if strategies can be properly loaded, only check them if they can be.
for strat in strategy_list:
for strategy_obj in strategy_objs:
if strategy_obj['name'] == strat and strategy_obj not in strategy_list:
lookaheadAnalysis_instances.append(
LookaheadAnalysisSubFunctions.initialize_single_lookahead_analysis(
config, strategy_obj))
break
# report the results
if lookaheadAnalysis_instances:
LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
config, lookaheadAnalysis_instances)
if config.get('lookahead_analysis_exportfilename') is not None:
LookaheadAnalysisSubFunctions.export_to_csv(config, lookaheadAnalysis_instances)
else:
logger.error("There were no strategies specified neither through "
"--strategy nor through "
"--strategy_list "
"or timeframe was not specified.")

View File

@@ -0,0 +1,18 @@
# flake8: noqa: F401
from freqtrade.optimize.optimize_reports.bt_output import (generate_edge_table,
show_backtest_result,
show_backtest_results,
show_sorted_pairlist,
text_table_add_metrics,
text_table_bt_results,
text_table_exit_reason,
text_table_periodic_breakdown,
text_table_strategy, text_table_tags)
from freqtrade.optimize.optimize_reports.bt_storage import (store_backtest_analysis_results,
store_backtest_stats)
from freqtrade.optimize.optimize_reports.optimize_reports import (
generate_all_periodic_breakdown_stats, generate_backtest_stats, generate_daily_stats,
generate_exit_reason_stats, generate_pair_metrics, generate_periodic_breakdown_stats,
generate_rejected_signals, generate_strategy_comparison, generate_strategy_stats,
generate_tag_metrics, generate_trade_signal_candles, generate_trading_stats,
generate_wins_draws_losses)

View File

@@ -0,0 +1,405 @@
import logging
from typing import Any, Dict, List
from tabulate import tabulate
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config
from freqtrade.misc import decimals_per_coin, round_coin_value
from freqtrade.optimize.optimize_reports.optimize_reports import (generate_periodic_breakdown_stats,
generate_wins_draws_losses)
logger = logging.getLogger(__name__)
def _get_line_floatfmt(stake_currency: str) -> List[str]:
"""
Generate floatformat (goes in line with _generate_result_line())
"""
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
'.2f', 'd', 's', 's']
def _get_line_header(first_column: str, stake_currency: str,
direction: str = 'Entries') -> List[str]:
"""
Generate header lines (goes in line with _generate_result_line())
"""
return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
'Win Draw Loss Win%']
def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generates and returns a text table for the given backtest data and the results dataframe
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
headers = _get_line_header('Pair', stake_currency)
floatfmt = _get_line_floatfmt(stake_currency)
output = [[
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
t['profit_total_pct'], t['duration_avg'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
] for t in pair_results]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generate small table outlining Backtest results
:param sell_reason_stats: Exit reason metrics
:param stake_currency: Stakecurrency used
:return: pretty printed table with tabulate as string
"""
headers = [
'Exit Reason',
'Exits',
'Win Draws Loss Win%',
'Avg Profit %',
'Cum Profit %',
f'Tot Profit {stake_currency}',
'Tot Profit %',
]
output = [[
t.get('exit_reason', t.get('sell_reason')), t['trades'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
t['profit_mean_pct'], t['profit_sum_pct'],
round_coin_value(t['profit_total_abs'], stake_currency, False),
t['profit_total_pct'],
] for t in exit_reason_stats]
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generates and returns a text table for the given backtest data and the results dataframe
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
if (tag_type == "enter_tag"):
headers = _get_line_header("TAG", stake_currency)
else:
headers = _get_line_header("TAG", stake_currency, 'Exits')
floatfmt = _get_line_floatfmt(stake_currency)
output = [
[
t['key'] if t['key'] is not None and len(
t['key']) > 0 else "OTHER",
t['trades'],
t['profit_mean_pct'],
t['profit_sum_pct'],
t['profit_total_abs'],
t['profit_total_pct'],
t['duration_avg'],
generate_wins_draws_losses(
t['wins'],
t['draws'],
t['losses'])] for t in tag_results]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
stake_currency: str, period: str) -> str:
"""
Generate small table with Backtest results by days
:param days_breakdown_stats: Days breakdown metrics
:param stake_currency: Stakecurrency used
:return: pretty printed table with tabulate as string
"""
headers = [
period.capitalize(),
f'Tot Profit {stake_currency}',
'Wins',
'Draws',
'Losses',
]
output = [[
d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
d['wins'], d['draws'], d['loses'],
] for d in days_breakdown_stats]
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
def text_table_strategy(strategy_results, stake_currency: str) -> str:
"""
Generate summary table per strategy
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
floatfmt = _get_line_floatfmt(stake_currency)
headers = _get_line_header('Strategy', stake_currency)
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
# therefore we slip this column in only for strategy summary here.
headers.append('Drawdown')
# Align drawdown string on the center two space separator.
if 'max_drawdown_account' in strategy_results[0]:
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
else:
# Support for prior backtest results
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
dd_pad_per = max([len(dd) for dd in drawdown])
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
for t, dd in zip(strategy_results, drawdown)]
output = [[
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
t['profit_total_pct'], t['duration_avg'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
for t, drawdown in zip(strategy_results, drawdown)]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_add_metrics(strat_results: Dict) -> str:
if len(strat_results['trades']) > 0:
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
short_metrics = [
('', ''), # Empty line to improve readability
('Long / Short',
f"{strat_results.get('trade_count_long', 'total_trades')} / "
f"{strat_results.get('trade_count_short', 0)}"),
('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
strat_results['stake_currency'])),
('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
strat_results['stake_currency'])),
] if strat_results.get('trade_count_short', 0) > 0 else []
drawdown_metrics = []
if 'max_relative_drawdown' in strat_results:
# Compatibility to show old hyperopt results
drawdown_metrics.append(
('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
)
drawdown_metrics.extend([
('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
if 'max_drawdown_account' in strat_results else (
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
strat_results['stake_currency'])),
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
strat_results['stake_currency'])),
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
strat_results['stake_currency'])),
('Drawdown Start', strat_results['drawdown_start']),
('Drawdown End', strat_results['drawdown_end']),
])
entry_adjustment_metrics = [
('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
] if strat_results.get('canceled_entry_orders', 0) > 0 else []
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
# command stores these results and newer version of freqtrade must be able to handle old
# results with missing new fields.
metrics = [
('Backtesting from', strat_results['backtest_start']),
('Backtesting to', strat_results['backtest_end']),
('Max open trades', strat_results['max_open_trades']),
('', ''), # Empty line to improve readability
('Total/Daily Avg Trades',
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
('Starting balance', round_coin_value(strat_results['starting_balance'],
strat_results['stake_currency'])),
('Final balance', round_coin_value(strat_results['final_balance'],
strat_results['stake_currency'])),
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
strat_results['stake_currency'])),
('Total profit %', f"{strat_results['profit_total']:.2%}"),
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
in strat_results else 'N/A'),
('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy'
in strat_results else 'N/A'),
('Trades per day', strat_results['trades_per_day']),
('Avg. daily profit %',
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
strat_results['stake_currency'])),
('Total trade volume', round_coin_value(strat_results['total_volume'],
strat_results['stake_currency'])),
*short_metrics,
('', ''), # Empty line to improve readability
('Best Pair', f"{strat_results['best_pair']['key']} "
f"{strat_results['best_pair']['profit_sum']:.2%}"),
('Worst Pair', f"{strat_results['worst_pair']['key']} "
f"{strat_results['worst_pair']['profit_sum']:.2%}"),
('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
('Worst trade', f"{worst_trade['pair']} "
f"{worst_trade['profit_ratio']:.2%}"),
('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
strat_results['stake_currency'])),
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
strat_results['stake_currency'])),
('Days win/draw/lose', f"{strat_results['winning_days']} / "
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
('Entry/Exit Timeouts',
f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
*entry_adjustment_metrics,
('', ''), # Empty line to improve readability
('Min balance', round_coin_value(strat_results['csum_min'],
strat_results['stake_currency'])),
('Max balance', round_coin_value(strat_results['csum_max'],
strat_results['stake_currency'])),
*drawdown_metrics,
('Market change', f"{strat_results['market_change']:.2%}"),
]
return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
else:
start_balance = round_coin_value(strat_results['starting_balance'],
strat_results['stake_currency'])
stake_amount = round_coin_value(
strat_results['stake_amount'], strat_results['stake_currency']
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
message = ("No trades made. "
f"Your starting balance was {start_balance}, "
f"and your stake was {stake_amount}."
)
return message
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
backtest_breakdown=[]):
"""
Print results for one strategy
"""
# Print results
print(f"Result for strategy {strategy}")
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
if isinstance(table, str):
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
print(table)
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
print(table)
if (results.get('results_per_enter_tag') is not None
or results.get('results_per_buy_tag') is not None):
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
table = text_table_tags(
"enter_tag",
results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
print(table)
exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
table = text_table_exit_reason(exit_reason_stats=exit_reasons,
stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
print(table)
for period in backtest_breakdown:
if period in results.get('periodic_breakdown', {}):
days_breakdown_stats = results['periodic_breakdown'][period]
else:
days_breakdown_stats = generate_periodic_breakdown_stats(
trade_list=results['trades'], period=period)
table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
stake_currency=stake_currency, period=period)
if isinstance(table, str) and len(table) > 0:
print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
print(table)
table = text_table_add_metrics(results)
if isinstance(table, str) and len(table) > 0:
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
print(table)
if isinstance(table, str) and len(table) > 0:
print('=' * len(table.splitlines()[0]))
print()
def show_backtest_results(config: Config, backtest_stats: Dict):
stake_currency = config['stake_currency']
for strategy, results in backtest_stats['strategy'].items():
show_backtest_result(
strategy, results, stake_currency,
config.get('backtest_breakdown', []))
if len(backtest_stats['strategy']) > 0:
# Print Strategy summary table
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
f" Max open trades : {results['max_open_trades']}")
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
print(table)
print('=' * len(table.splitlines()[0]))
print('\nFor more details, please look at the detail tables above')
def show_sorted_pairlist(config: Config, backtest_stats: Dict):
if config.get('backtest_show_pair_list', False):
for strategy, results in backtest_stats['strategy'].items():
print(f"Pairs for Strategy {strategy}: \n[")
for result in results['results_per_pair']:
if result["key"] != 'TOTAL':
print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
print("]")
def generate_edge_table(results: dict) -> str:
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
tabular_data = []
headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
'Average Duration (min)']
for result in results.items():
if result[1].nb_trades > 0:
tabular_data.append([
result[0],
result[1].stoploss,
result[1].winrate,
result[1].risk_reward_ratio,
result[1].required_risk_reward,
result[1].expectancy,
result[1].nb_trades,
round(result[1].avg_trade_duration)
])
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(tabular_data, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")

View File

@@ -0,0 +1,71 @@
import logging
from pathlib import Path
from typing import Dict
from pandas import DataFrame
from freqtrade.constants import LAST_BT_RESULT_FN
from freqtrade.misc import file_dump_joblib, file_dump_json
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
logger = logging.getLogger(__name__)
def store_backtest_stats(
recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
"""
Stores backtest results
:param recordfilename: Path object, which can either be a filename or a directory.
Filenames will be appended with a timestamp right before the suffix
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
:param stats: Dataframe containing the backtesting statistics
:param dtappendix: Datetime to use for the filename
"""
if recordfilename.is_dir():
filename = (recordfilename / f'backtest-result-{dtappendix}.json')
else:
filename = Path.joinpath(
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
).with_suffix(recordfilename.suffix)
# Store metadata separately.
file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
del stats['metadata']
file_dump_json(filename, stats)
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
def _store_backtest_analysis_data(
recordfilename: Path, data: Dict[str, Dict],
dtappendix: str, name: str) -> Path:
"""
Stores backtest trade candles for analysis
:param recordfilename: Path object, which can either be a filename or a directory.
Filenames will be appended with a timestamp right before the suffix
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
as filename
:param candles: Dict containing the backtesting data for analysis
:param dtappendix: Datetime to use for the filename
:param name: Name to use for the file, e.g. signals, rejected
"""
if recordfilename.is_dir():
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
else:
filename = Path.joinpath(
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
)
file_dump_joblib(filename, data)
return filename
def store_backtest_analysis_results(
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
dtappendix: str) -> None:
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")

View File

@@ -1,83 +1,20 @@
import logging
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Dict, List, Union
from pandas import DataFrame, concat, to_datetime
from tabulate import tabulate
from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
UNLIMITED_STAKE_AMOUNT, Config, IntOrInf)
from freqtrade.constants import BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, IntOrInf
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
calculate_expectancy, calculate_market_change,
calculate_max_drawdown, calculate_sharpe, calculate_sortino)
from freqtrade.misc import decimals_per_coin, file_dump_joblib, file_dump_json, round_coin_value
from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename
from freqtrade.misc import decimals_per_coin, round_coin_value
logger = logging.getLogger(__name__)
def store_backtest_stats(
recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None:
"""
Stores backtest results
:param recordfilename: Path object, which can either be a filename or a directory.
Filenames will be appended with a timestamp right before the suffix
while for directories, <directory>/backtest-result-<datetime>.json will be used as filename
:param stats: Dataframe containing the backtesting statistics
:param dtappendix: Datetime to use for the filename
"""
if recordfilename.is_dir():
filename = (recordfilename / f'backtest-result-{dtappendix}.json')
else:
filename = Path.joinpath(
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}'
).with_suffix(recordfilename.suffix)
# Store metadata separately.
file_dump_json(get_backtest_metadata_filename(filename), stats['metadata'])
del stats['metadata']
file_dump_json(filename, stats)
latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
def _store_backtest_analysis_data(
recordfilename: Path, data: Dict[str, Dict],
dtappendix: str, name: str) -> Path:
"""
Stores backtest trade candles for analysis
:param recordfilename: Path object, which can either be a filename or a directory.
Filenames will be appended with a timestamp right before the suffix
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
as filename
:param candles: Dict containing the backtesting data for analysis
:param dtappendix: Datetime to use for the filename
:param name: Name to use for the file, e.g. signals, rejected
"""
if recordfilename.is_dir():
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
else:
filename = Path.joinpath(
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
)
file_dump_joblib(filename, data)
return filename
def store_backtest_analysis_results(
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
dtappendix: str) -> None:
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame],
bt_results: Dict[str, Any]) -> DataFrame:
signal_candles_only = {}
@@ -120,24 +57,6 @@ def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame],
return rejected_candles_only
def _get_line_floatfmt(stake_currency: str) -> List[str]:
"""
Generate floatformat (goes in line with _generate_result_line())
"""
return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f',
'.2f', 'd', 's', 's']
def _get_line_header(first_column: str, stake_currency: str,
direction: str = 'Entries') -> List[str]:
"""
Generate header lines (goes in line with _generate_result_line())
"""
return [first_column, direction, 'Avg Profit %', 'Cum Profit %',
f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration',
'Win Draw Loss Win%']
def generate_wins_draws_losses(wins, draws, losses):
if wins > 0 and losses == 0:
wl_ratio = '100'
@@ -295,31 +214,6 @@ def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]:
return tabular_data
def generate_edge_table(results: dict) -> str:
floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd')
tabular_data = []
headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio',
'Required Risk Reward', 'Expectancy', 'Total Number of Trades',
'Average Duration (min)']
for result in results.items():
if result[1].nb_trades > 0:
tabular_data.append([
result[0],
result[1].stoploss,
result[1].winrate,
result[1].risk_reward_ratio,
result[1].required_risk_reward,
result[1].expectancy,
result[1].nb_trades,
round(result[1].avg_trade_duration)
])
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(tabular_data, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def _get_resample_from_period(period: str) -> str:
if period == 'day':
return '1d'
@@ -652,357 +546,3 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame],
result['strategy_comparison'] = strategy_results
return result
###
# Start output section
###
def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generates and returns a text table for the given backtest data and the results dataframe
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
headers = _get_line_header('Pair', stake_currency)
floatfmt = _get_line_floatfmt(stake_currency)
output = [[
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
t['profit_total_pct'], t['duration_avg'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses'])
] for t in pair_results]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generate small table outlining Backtest results
:param sell_reason_stats: Exit reason metrics
:param stake_currency: Stakecurrency used
:return: pretty printed table with tabulate as string
"""
headers = [
'Exit Reason',
'Exits',
'Win Draws Loss Win%',
'Avg Profit %',
'Cum Profit %',
f'Tot Profit {stake_currency}',
'Tot Profit %',
]
output = [[
t.get('exit_reason', t.get('sell_reason')), t['trades'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']),
t['profit_mean_pct'], t['profit_sum_pct'],
round_coin_value(t['profit_total_abs'], stake_currency, False),
t['profit_total_pct'],
] for t in exit_reason_stats]
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str:
"""
Generates and returns a text table for the given backtest data and the results dataframe
:param pair_results: List of Dictionaries - one entry per pair + final TOTAL row
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
if (tag_type == "enter_tag"):
headers = _get_line_header("TAG", stake_currency)
else:
headers = _get_line_header("TAG", stake_currency, 'Exits')
floatfmt = _get_line_floatfmt(stake_currency)
output = [
[
t['key'] if t['key'] is not None and len(
t['key']) > 0 else "OTHER",
t['trades'],
t['profit_mean_pct'],
t['profit_sum_pct'],
t['profit_total_abs'],
t['profit_total_pct'],
t['duration_avg'],
generate_wins_draws_losses(
t['wins'],
t['draws'],
t['losses'])] for t in tag_results]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]],
stake_currency: str, period: str) -> str:
"""
Generate small table with Backtest results by days
:param days_breakdown_stats: Days breakdown metrics
:param stake_currency: Stakecurrency used
:return: pretty printed table with tabulate as string
"""
headers = [
period.capitalize(),
f'Tot Profit {stake_currency}',
'Wins',
'Draws',
'Losses',
]
output = [[
d['date'], round_coin_value(d['profit_abs'], stake_currency, False),
d['wins'], d['draws'], d['loses'],
] for d in days_breakdown_stats]
return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right")
def text_table_strategy(strategy_results, stake_currency: str) -> str:
"""
Generate summary table per strategy
:param strategy_results: Dict of <Strategyname: DataFrame> containing results for all strategies
:param stake_currency: stake-currency - used to correctly name headers
:return: pretty printed table with tabulate as string
"""
floatfmt = _get_line_floatfmt(stake_currency)
headers = _get_line_header('Strategy', stake_currency)
# _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless
# therefore we slip this column in only for strategy summary here.
headers.append('Drawdown')
# Align drawdown string on the center two space separator.
if 'max_drawdown_account' in strategy_results[0]:
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
else:
# Support for prior backtest results
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results])
dd_pad_per = max([len(dd) for dd in drawdown])
drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
for t, dd in zip(strategy_results, drawdown)]
output = [[
t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'],
t['profit_total_pct'], t['duration_avg'],
generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown]
for t, drawdown in zip(strategy_results, drawdown)]
# Ignore type as floatfmt does allow tuples but mypy does not know that
return tabulate(output, headers=headers,
floatfmt=floatfmt, tablefmt="orgtbl", stralign="right")
def text_table_add_metrics(strat_results: Dict) -> str:
if len(strat_results['trades']) > 0:
best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio'])
worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio'])
short_metrics = [
('', ''), # Empty line to improve readability
('Long / Short',
f"{strat_results.get('trade_count_long', 'total_trades')} / "
f"{strat_results.get('trade_count_short', 0)}"),
('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"),
('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"),
('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'],
strat_results['stake_currency'])),
('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'],
strat_results['stake_currency'])),
] if strat_results.get('trade_count_short', 0) > 0 else []
drawdown_metrics = []
if 'max_relative_drawdown' in strat_results:
# Compatibility to show old hyperopt results
drawdown_metrics.append(
('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}")
)
drawdown_metrics.extend([
('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}")
if 'max_drawdown_account' in strat_results else (
'Drawdown', f"{strat_results['max_drawdown']:.2%}"),
('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'],
strat_results['stake_currency'])),
('Drawdown high', round_coin_value(strat_results['max_drawdown_high'],
strat_results['stake_currency'])),
('Drawdown low', round_coin_value(strat_results['max_drawdown_low'],
strat_results['stake_currency'])),
('Drawdown Start', strat_results['drawdown_start']),
('Drawdown End', strat_results['drawdown_end']),
])
entry_adjustment_metrics = [
('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')),
('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')),
('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')),
] if strat_results.get('canceled_entry_orders', 0) > 0 else []
# Newly added fields should be ignored if they are missing in strat_results. hyperopt-show
# command stores these results and newer version of freqtrade must be able to handle old
# results with missing new fields.
metrics = [
('Backtesting from', strat_results['backtest_start']),
('Backtesting to', strat_results['backtest_end']),
('Max open trades', strat_results['max_open_trades']),
('', ''), # Empty line to improve readability
('Total/Daily Avg Trades',
f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"),
('Starting balance', round_coin_value(strat_results['starting_balance'],
strat_results['stake_currency'])),
('Final balance', round_coin_value(strat_results['final_balance'],
strat_results['stake_currency'])),
('Absolute profit ', round_coin_value(strat_results['profit_total_abs'],
strat_results['stake_currency'])),
('Total profit %', f"{strat_results['profit_total']:.2%}"),
('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'),
('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'),
('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'),
('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'),
('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor'
in strat_results else 'N/A'),
('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy'
in strat_results else 'N/A'),
('Trades per day', strat_results['trades_per_day']),
('Avg. daily profit %',
f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"),
('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'],
strat_results['stake_currency'])),
('Total trade volume', round_coin_value(strat_results['total_volume'],
strat_results['stake_currency'])),
*short_metrics,
('', ''), # Empty line to improve readability
('Best Pair', f"{strat_results['best_pair']['key']} "
f"{strat_results['best_pair']['profit_sum']:.2%}"),
('Worst Pair', f"{strat_results['worst_pair']['key']} "
f"{strat_results['worst_pair']['profit_sum']:.2%}"),
('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
('Worst trade', f"{worst_trade['pair']} "
f"{worst_trade['profit_ratio']:.2%}"),
('Best day', round_coin_value(strat_results['backtest_best_day_abs'],
strat_results['stake_currency'])),
('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'],
strat_results['stake_currency'])),
('Days win/draw/lose', f"{strat_results['winning_days']} / "
f"{strat_results['draw_days']} / {strat_results['losing_days']}"),
('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"),
('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"),
('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')),
('Entry/Exit Timeouts',
f"{strat_results.get('timedout_entry_orders', 'N/A')} / "
f"{strat_results.get('timedout_exit_orders', 'N/A')}"),
*entry_adjustment_metrics,
('', ''), # Empty line to improve readability
('Min balance', round_coin_value(strat_results['csum_min'],
strat_results['stake_currency'])),
('Max balance', round_coin_value(strat_results['csum_max'],
strat_results['stake_currency'])),
*drawdown_metrics,
('Market change', f"{strat_results['market_change']:.2%}"),
]
return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl")
else:
start_balance = round_coin_value(strat_results['starting_balance'],
strat_results['stake_currency'])
stake_amount = round_coin_value(
strat_results['stake_amount'], strat_results['stake_currency']
) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited'
message = ("No trades made. "
f"Your starting balance was {start_balance}, "
f"and your stake was {stake_amount}."
)
return message
def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str,
backtest_breakdown=[]):
"""
Print results for one strategy
"""
# Print results
print(f"Result for strategy {strategy}")
table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency)
if isinstance(table, str):
print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '='))
print(table)
table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '='))
print(table)
if (results.get('results_per_enter_tag') is not None
or results.get('results_per_buy_tag') is not None):
# results_per_buy_tag is deprecated and should be removed 2 versions after short golive.
table = text_table_tags(
"enter_tag",
results.get('results_per_enter_tag', results.get('results_per_buy_tag')),
stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '='))
print(table)
exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary'))
table = text_table_exit_reason(exit_reason_stats=exit_reasons,
stake_currency=stake_currency)
if isinstance(table, str) and len(table) > 0:
print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '='))
print(table)
for period in backtest_breakdown:
if period in results.get('periodic_breakdown', {}):
days_breakdown_stats = results['periodic_breakdown'][period]
else:
days_breakdown_stats = generate_periodic_breakdown_stats(
trade_list=results['trades'], period=period)
table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats,
stake_currency=stake_currency, period=period)
if isinstance(table, str) and len(table) > 0:
print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '='))
print(table)
table = text_table_add_metrics(results)
if isinstance(table, str) and len(table) > 0:
print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '='))
print(table)
if isinstance(table, str) and len(table) > 0:
print('=' * len(table.splitlines()[0]))
print()
def show_backtest_results(config: Config, backtest_stats: Dict):
stake_currency = config['stake_currency']
for strategy, results in backtest_stats['strategy'].items():
show_backtest_result(
strategy, results, stake_currency,
config.get('backtest_breakdown', []))
if len(backtest_stats['strategy']) > 0:
# Print Strategy summary table
table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency)
print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |"
f" Max open trades : {results['max_open_trades']}")
print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '='))
print(table)
print('=' * len(table.splitlines()[0]))
print('\nFor more details, please look at the detail tables above')
def show_sorted_pairlist(config: Config, backtest_stats: Dict):
if config.get('backtest_show_pair_list', False):
for strategy, results in backtest_stats['strategy'].items():
print(f"Pairs for Strategy {strategy}: \n[")
for result in results['results_per_pair']:
if result["key"] != 'TOTAL':
print(f'"{result["key"]}", // {result["profit_mean"]:.2%}')
print("]")

View File

@@ -42,7 +42,7 @@ class _KeyValueStoreModel(ModelBase):
int_value: Mapped[Optional[int]]
class KeyValueStore():
class KeyValueStore:
"""
Generic bot-wide, persistent key-value store
Can be used to store generic values, e.g. very first bot startup time.

View File

@@ -11,7 +11,7 @@ from freqtrade.persistence.models import PairLock
logger = logging.getLogger(__name__)
class PairLocks():
class PairLocks:
"""
Pairlocks middleware class
Abstracts the database layer away so it becomes optional - which will be necessary to support

View File

@@ -10,6 +10,7 @@ from typing import Any, ClassVar, Dict, List, Optional, Sequence, cast
from sqlalchemy import (Enum, Float, ForeignKey, Integer, ScalarResult, Select, String,
UniqueConstraint, desc, func, select)
from sqlalchemy.orm import Mapped, lazyload, mapped_column, relationship, validates
from typing_extensions import Self
from freqtrade.constants import (CUSTOM_TAG_MAX_LENGTH, DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC,
NON_OPEN_EXCHANGE_STATES, BuySell, LongShort)
@@ -97,7 +98,7 @@ class Order(ModelBase):
@property
def safe_filled(self) -> float:
return self.filled if self.filled is not None else self.amount or 0.0
return self.filled if self.filled is not None else 0.0
@property
def safe_cost(self) -> float:
@@ -246,15 +247,15 @@ class Order(ModelBase):
else:
logger.warning(f"Did not find order for {order}.")
@staticmethod
@classmethod
def parse_from_ccxt_object(
order: Dict[str, Any], pair: str, side: str,
amount: Optional[float] = None, price: Optional[float] = None) -> 'Order':
cls, order: Dict[str, Any], pair: str, side: str,
amount: Optional[float] = None, price: Optional[float] = None) -> Self:
"""
Parse an order from a ccxt object and return a new order Object.
Optional support for overriding amount and price is only used for test simplification.
"""
o = Order(
o = cls(
order_id=str(order['id']),
ft_order_side=side,
ft_pair=pair,
@@ -282,7 +283,7 @@ class Order(ModelBase):
return Order.session.scalars(select(Order).filter(Order.order_id == order_id)).first()
class LocalTrade():
class LocalTrade:
"""
Trade database model.
Used in backtesting - must be aligned to Trade model!
@@ -703,7 +704,7 @@ class LocalTrade():
self.stoploss_order_id = None
self.close_rate_requested = self.stop_loss
self.exit_reason = ExitType.STOPLOSS_ON_EXCHANGE.value
if self.is_open:
if self.is_open and order.safe_filled > 0:
logger.info(f'{order.order_type.upper()} is hit for {self}.')
else:
raise ValueError(f'Unknown order type: {order.order_type}')
@@ -1391,7 +1392,10 @@ class Trade(ModelBase, LocalTrade):
e.g. `(trade_filter=Trade.id == trade_id)`
:return: unsorted query object
"""
return Trade.session.scalars(Trade.get_trades_query(trade_filter, include_orders))
query = Trade.get_trades_query(trade_filter, include_orders)
# this sholud remain split. if use_db is False, session is not available and the above will
# raise an exception.
return Trade.session.scalars(query)
@staticmethod
def get_open_order_trades() -> List['Trade']:
@@ -1638,8 +1642,8 @@ class Trade(ModelBase, LocalTrade):
)).scalar_one()
return trading_volume
@staticmethod
def from_json(json_str: str) -> 'Trade':
@classmethod
def from_json(cls, json_str: str) -> Self:
"""
Create a Trade instance from a json string.
@@ -1649,7 +1653,7 @@ class Trade(ModelBase, LocalTrade):
"""
import rapidjson
data = rapidjson.loads(json_str)
trade = Trade(
trade = cls(
id=data["trade_id"],
pair=data["pair"],
base_currency=data["base_currency"],

View File

@@ -12,7 +12,7 @@ from freqtrade.constants import Config, ListPairsWithTimeframes
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Tickers
from freqtrade.misc import plural
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
from freqtrade.util import PeriodicCache, dt_floor_day, dt_now, dt_ts
@@ -68,6 +68,27 @@ class AgeFilter(IPairList):
f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}"
) if self._max_days_listed else '')
@staticmethod
def description() -> str:
return "Filter pairs by age (days listed)."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"min_days_listed": {
"type": "number",
"default": 10,
"description": "Minimum Days Listed",
"help": "Minimum number of days a pair must have been listed on the exchange.",
},
"max_days_listed": {
"type": "number",
"default": None,
"description": "Maximum Days Listed",
"help": "Maximum number of days a pair must have been listed on the exchange.",
},
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
:param pairlist: pairlist to filter or sort

View File

@@ -4,7 +4,7 @@ PairList Handler base class
import logging
from abc import ABC, abstractmethod, abstractproperty
from copy import deepcopy
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Literal, Optional, TypedDict, Union
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
@@ -16,8 +16,44 @@ from freqtrade.mixins import LoggingMixin
logger = logging.getLogger(__name__)
class __PairlistParameterBase(TypedDict):
description: str
help: str
class __NumberPairlistParameter(__PairlistParameterBase):
type: Literal["number"]
default: Union[int, float, None]
class __StringPairlistParameter(__PairlistParameterBase):
type: Literal["string"]
default: Union[str, None]
class __OptionPairlistParameter(__PairlistParameterBase):
type: Literal["option"]
default: Union[str, None]
options: List[str]
class __BoolPairlistParameter(__PairlistParameterBase):
type: Literal["boolean"]
default: Union[bool, None]
PairlistParameter = Union[
__NumberPairlistParameter,
__StringPairlistParameter,
__OptionPairlistParameter,
__BoolPairlistParameter
]
class IPairList(LoggingMixin, ABC):
is_pairlist_generator = False
def __init__(self, exchange: Exchange, pairlistmanager,
config: Config, pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
@@ -53,6 +89,37 @@ class IPairList(LoggingMixin, ABC):
If no Pairlist requires tickers, an empty Dict is passed
as tickers argument to filter_pairlist
"""
return False
@staticmethod
@abstractmethod
def description() -> str:
"""
Return description of this Pairlist Handler
-> Please overwrite in subclasses
"""
return ""
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
"""
Return parameters used by this Pairlist Handler, and their type
contains a dictionary with the parameter name as key, and a dictionary
with the type and default value.
-> Please overwrite in subclasses
"""
return {}
@staticmethod
def refresh_period_parameter() -> Dict[str, PairlistParameter]:
return {
"refresh_period": {
"type": "number",
"default": 1800,
"description": "Refresh period",
"help": "Refresh period in seconds",
}
}
@abstractmethod
def short_desc(self) -> str:

View File

@@ -7,7 +7,7 @@ from typing import Any, Dict, List
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Tickers
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
logger = logging.getLogger(__name__)
@@ -43,6 +43,27 @@ class OffsetFilter(IPairList):
return f"{self.name} - Taking {self._number_pairs} Pairs, starting from {self._offset}."
return f"{self.name} - Offsetting pairs by {self._offset}."
@staticmethod
def description() -> str:
return "Offset pair list filter."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"offset": {
"type": "number",
"default": 0,
"description": "Offset",
"help": "Offset of the pairlist.",
},
"number_assets": {
"type": "number",
"default": 0,
"description": "Number of assets",
"help": "Number of assets to use from the pairlist, starting from offset.",
},
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
Filters and sorts pairlist and returns the whitelist again.

View File

@@ -9,7 +9,7 @@ import pandas as pd
from freqtrade.constants import Config
from freqtrade.exchange.types import Tickers
from freqtrade.persistence import Trade
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
logger = logging.getLogger(__name__)
@@ -40,6 +40,27 @@ class PerformanceFilter(IPairList):
"""
return f"{self.name} - Sorting pairs by performance."
@staticmethod
def description() -> str:
return "Filter pairs by performance."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"minutes": {
"type": "number",
"default": 0,
"description": "Minutes",
"help": "Consider trades from the last X minutes. 0 means all trades.",
},
"min_profit": {
"type": "number",
"default": None,
"description": "Minimum profit",
"help": "Minimum profit in percent. Pairs with less profit are removed.",
},
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
Filters and sorts pairlist and returns the allowlist again.

View File

@@ -46,6 +46,10 @@ class PrecisionFilter(IPairList):
"""
return f"{self.name} - Filtering untradable pairs."
@staticmethod
def description() -> str:
return "Filters low-value coins which would not allow setting stoplosses."
def _validate_pair(self, pair: str, ticker: Optional[Ticker]) -> bool:
"""
Check if pair has enough room to add a stoploss to avoid "unsellable" buys of very

View File

@@ -7,7 +7,7 @@ from typing import Any, Dict, Optional
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Ticker
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
logger = logging.getLogger(__name__)
@@ -65,6 +65,40 @@ class PriceFilter(IPairList):
return f"{self.name} - No price filters configured."
@staticmethod
def description() -> str:
return "Filter pairs by price."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"low_price_ratio": {
"type": "number",
"default": 0,
"description": "Low price ratio",
"help": ("Remove pairs where a price move of 1 price unit (pip) "
"is above this ratio."),
},
"min_price": {
"type": "number",
"default": 0,
"description": "Minimum price",
"help": "Remove pairs with a price below this value.",
},
"max_price": {
"type": "number",
"default": 0,
"description": "Maximum price",
"help": "Remove pairs with a price above this value.",
},
"max_value": {
"type": "number",
"default": 0,
"description": "Maximum value",
"help": "Remove pairs with a value (price * amount) above this value.",
},
}
def _validate_pair(self, pair: str, ticker: Optional[Ticker]) -> bool:
"""
Check if if one price-step (pip) is > than a certain barrier.

View File

@@ -8,7 +8,7 @@ from typing import Any, Dict, List, Optional
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Tickers
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
logger = logging.getLogger(__name__)
@@ -28,6 +28,7 @@ class ProducerPairList(IPairList):
}
],
"""
is_pairlist_generator = True
def __init__(self, exchange, pairlistmanager,
config: Dict[str, Any], pairlistconfig: Dict[str, Any],
@@ -56,6 +57,28 @@ class ProducerPairList(IPairList):
"""
return f"{self.name} - {self._producer_name}"
@staticmethod
def description() -> str:
return "Get a pairlist from an upstream bot."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"number_assets": {
"type": "number",
"default": 0,
"description": "Number of assets",
"help": "Number of assets to use from the pairlist",
},
"producer_name": {
"type": "string",
"default": "default",
"description": "Producer name",
"help": ("Name of the producer to use. Requires additional "
"external_message_consumer configuration.")
},
}
def _filter_pairlist(self, pairlist: Optional[List[str]]):
upstream_pairlist = self._pairlistmanager._dataprovider.get_producer_pairs(
self._producer_name)

View File

@@ -15,7 +15,7 @@ from freqtrade import __version__
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Tickers
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
logger = logging.getLogger(__name__)
@@ -23,6 +23,8 @@ logger = logging.getLogger(__name__)
class RemotePairList(IPairList):
is_pairlist_generator = True
def __init__(self, exchange, pairlistmanager,
config: Config, pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
@@ -63,6 +65,46 @@ class RemotePairList(IPairList):
"""
return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist."
@staticmethod
def description() -> str:
return "Retrieve pairs from a remote API."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"number_assets": {
"type": "number",
"default": 0,
"description": "Number of assets",
"help": "Number of assets to use from the pairlist.",
},
"pairlist_url": {
"type": "string",
"default": "",
"description": "URL to fetch pairlist from",
"help": "URL to fetch pairlist from",
},
**IPairList.refresh_period_parameter(),
"keep_pairlist_on_failure": {
"type": "boolean",
"default": True,
"description": "Keep last pairlist on failure",
"help": "Keep last pairlist on failure",
},
"read_timeout": {
"type": "number",
"default": 60,
"description": "Read timeout",
"help": "Request timeout for remote pairlist",
},
"bearer_token": {
"type": "string",
"default": "",
"description": "Bearer token",
"help": "Bearer token - used for auth against the upstream service.",
},
}
def process_json(self, jsonparse) -> List[str]:
pairlist = jsonparse.get('pairs', [])

View File

@@ -9,7 +9,7 @@ from freqtrade.constants import Config
from freqtrade.enums import RunMode
from freqtrade.exchange import timeframe_to_seconds
from freqtrade.exchange.types import Tickers
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
from freqtrade.util.periodic_cache import PeriodicCache
@@ -55,6 +55,28 @@ class ShuffleFilter(IPairList):
return (f"{self.name} - Shuffling pairs every {self._shuffle_freq}" +
(f", seed = {self._seed}." if self._seed is not None else "."))
@staticmethod
def description() -> str:
return "Randomize pairlist order."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"shuffle_frequency": {
"type": "option",
"default": "candle",
"options": ["candle", "iteration"],
"description": "Shuffle frequency",
"help": "Shuffle frequency. Can be either 'candle' or 'iteration'.",
},
"seed": {
"type": "number",
"default": None,
"description": "Random Seed",
"help": "Seed for random number generator. Not used in live mode.",
},
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
Filters and sorts pairlist and returns the whitelist again.

View File

@@ -7,7 +7,7 @@ from typing import Any, Dict, Optional
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Ticker
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
logger = logging.getLogger(__name__)
@@ -45,6 +45,21 @@ class SpreadFilter(IPairList):
return (f"{self.name} - Filtering pairs with ask/bid diff above "
f"{self._max_spread_ratio:.2%}.")
@staticmethod
def description() -> str:
return "Filter by bid/ask difference."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"max_spread_ratio": {
"type": "number",
"default": 0.005,
"description": "Max spread ratio",
"help": "Max spread ratio for a pair to be considered.",
},
}
def _validate_pair(self, pair: str, ticker: Optional[Ticker]) -> bool:
"""
Validate spread for the ticker

View File

@@ -9,7 +9,7 @@ from typing import Any, Dict, List
from freqtrade.constants import Config
from freqtrade.exchange.types import Tickers
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
logger = logging.getLogger(__name__)
@@ -17,6 +17,8 @@ logger = logging.getLogger(__name__)
class StaticPairList(IPairList):
is_pairlist_generator = True
def __init__(self, exchange, pairlistmanager,
config: Config, pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
@@ -40,6 +42,21 @@ class StaticPairList(IPairList):
"""
return f"{self.name}"
@staticmethod
def description() -> str:
return "Use pairlist as configured in config."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"allow_inactive": {
"type": "boolean",
"default": False,
"description": "Allow inactive pairs",
"help": "Allow inactive pairs to be in the whitelist.",
},
}
def gen_pairlist(self, tickers: Tickers) -> List[str]:
"""
Generate the pairlist

View File

@@ -15,7 +15,7 @@ from freqtrade.constants import Config, ListPairsWithTimeframes
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Tickers
from freqtrade.misc import plural
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
from freqtrade.util import dt_floor_day, dt_now, dt_ts
@@ -64,6 +64,34 @@ class VolatilityFilter(IPairList):
f"{self._min_volatility}-{self._max_volatility} "
f" the last {self._days} {plural(self._days, 'day')}.")
@staticmethod
def description() -> str:
return "Filter pairs by their recent volatility."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"lookback_days": {
"type": "number",
"default": 10,
"description": "Lookback Days",
"help": "Number of days to look back at.",
},
"min_volatility": {
"type": "number",
"default": 0,
"description": "Minimum Volatility",
"help": "Minimum volatility a pair must have to be considered.",
},
"max_volatility": {
"type": "number",
"default": None,
"description": "Maximum Volatility",
"help": "Maximum volatility a pair must have to be considered.",
},
**IPairList.refresh_period_parameter()
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
Validate trading range

View File

@@ -14,7 +14,7 @@ from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_prev_date
from freqtrade.exchange.types import Tickers
from freqtrade.misc import format_ms_time
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
from freqtrade.util import dt_now
@@ -26,6 +26,8 @@ SORT_VALUES = ['quoteVolume']
class VolumePairList(IPairList):
is_pairlist_generator = True
def __init__(self, exchange, pairlistmanager,
config: Config, pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
@@ -112,6 +114,53 @@ class VolumePairList(IPairList):
"""
return f"{self.name} - top {self._pairlistconfig['number_assets']} volume pairs."
@staticmethod
def description() -> str:
return "Provides dynamic pair list based on trade volumes."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"number_assets": {
"type": "number",
"default": 30,
"description": "Number of assets",
"help": "Number of assets to use from the pairlist",
},
"sort_key": {
"type": "option",
"default": "quoteVolume",
"options": SORT_VALUES,
"description": "Sort key",
"help": "Sort key to use for sorting the pairlist.",
},
"min_value": {
"type": "number",
"default": 0,
"description": "Minimum value",
"help": "Minimum value to use for filtering the pairlist.",
},
**IPairList.refresh_period_parameter(),
"lookback_days": {
"type": "number",
"default": 0,
"description": "Lookback Days",
"help": "Number of days to look back at.",
},
"lookback_timeframe": {
"type": "string",
"default": "",
"description": "Lookback Timeframe",
"help": "Timeframe to use for lookback.",
},
"lookback_period": {
"type": "number",
"default": 0,
"description": "Lookback Period",
"help": "Number of periods to look back at.",
},
}
def gen_pairlist(self, tickers: Tickers) -> List[str]:
"""
Generate the pairlist

View File

@@ -13,7 +13,7 @@ from freqtrade.constants import Config, ListPairsWithTimeframes
from freqtrade.exceptions import OperationalException
from freqtrade.exchange.types import Tickers
from freqtrade.misc import plural
from freqtrade.plugins.pairlist.IPairList import IPairList
from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter
from freqtrade.util import dt_floor_day, dt_now, dt_ts
@@ -62,6 +62,34 @@ class RangeStabilityFilter(IPairList):
f"{self._min_rate_of_change}{max_rate_desc} over the "
f"last {plural(self._days, 'day')}.")
@staticmethod
def description() -> str:
return "Filters pairs by their rate of change."
@staticmethod
def available_parameters() -> Dict[str, PairlistParameter]:
return {
"lookback_days": {
"type": "number",
"default": 10,
"description": "Lookback Days",
"help": "Number of days to look back at.",
},
"min_rate_of_change": {
"type": "number",
"default": 0.01,
"description": "Minimum Rate of Change",
"help": "Minimum rate of change to filter pairs.",
},
"max_rate_of_change": {
"type": "number",
"default": None,
"description": "Maximum Rate of Change",
"help": "Maximum rate of change to filter pairs.",
},
**IPairList.refresh_period_parameter()
}
def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]:
"""
Validate trading range

View File

@@ -15,7 +15,7 @@ from freqtrade.resolvers import ProtectionResolver
logger = logging.getLogger(__name__)
class ProtectionManager():
class ProtectionManager:
def __init__(self, config: Config, protections: List) -> None:
self._config = config

View File

@@ -2,7 +2,8 @@
This module loads custom exchanges
"""
import logging
from typing import Optional
from inspect import isclass
from typing import Any, Dict, List, Optional
import freqtrade.exchange as exchanges
from freqtrade.constants import Config, ExchangeConfig
@@ -72,3 +73,26 @@ class ExchangeResolver(IResolver):
f"Impossible to load Exchange '{exchange_name}'. This class does not exist "
"or contains Python code errors."
)
@classmethod
def search_all_objects(cls, config: Config, enum_failed: bool,
recursive: bool = False) -> List[Dict[str, Any]]:
"""
Searches for valid objects
:param config: Config object
:param enum_failed: If True, will return None for modules which fail.
Otherwise, failing modules are skipped.
:param recursive: Recursively walk directory tree searching for strategies
:return: List of dicts containing 'name', 'class' and 'location' entries
"""
result = []
for exchange_name in dir(exchanges):
exchange = getattr(exchanges, exchange_name)
if isclass(exchange) and issubclass(exchange, Exchange):
result.append({
'name': exchange_name,
'class': exchange,
'location': exchange.__module__,
'location_rel: ': exchange.__module__.replace('freqtrade.', ''),
})
return result

View File

@@ -34,7 +34,7 @@ class FreqaiModelResolver(IResolver):
Load the custom class from config parameter
:param config: configuration dictionary
"""
disallowed_models = ["BaseRegressionModel", "BaseTensorFlowModel"]
disallowed_models = ["BaseRegressionModel"]
freqaimodel_name = config.get("freqaimodel")
if not freqaimodel_name:

View File

@@ -41,7 +41,7 @@ class IResolver:
object_type: Type[Any]
object_type_str: str
user_subdir: Optional[str] = None
initial_search_path: Optional[Path]
initial_search_path: Optional[Path] = None
# Optional config setting containing a path (strategy_path, freqaimodel_path)
extra_path: Optional[str] = None

View File

@@ -0,0 +1,145 @@
import logging
from copy import deepcopy
from fastapi import APIRouter, BackgroundTasks, Depends
from fastapi.exceptions import HTTPException
from freqtrade.constants import Config
from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException
from freqtrade.rpc.api_server.api_schemas import (BackgroundTaskStatus, BgJobStarted,
ExchangeModePayloadMixin, PairListsPayload,
PairListsResponse, WhitelistEvaluateResponse)
from freqtrade.rpc.api_server.deps import get_config, get_exchange
from freqtrade.rpc.api_server.webserver_bgwork import ApiBG
logger = logging.getLogger(__name__)
# Private API, protected by authentication and webserver_mode dependency
router = APIRouter()
@router.get('/background/{jobid}', response_model=BackgroundTaskStatus, tags=['webserver'])
def background_job(jobid: str):
if not (job := ApiBG.jobs.get(jobid)):
raise HTTPException(status_code=404, detail='Job not found.')
return {
'job_id': jobid,
'job_category': job['category'],
'status': job['status'],
'running': job['is_running'],
'progress': job.get('progress'),
# 'job_error': job['error'],
}
@router.get('/pairlists/available',
response_model=PairListsResponse, tags=['pairlists', 'webserver'])
def list_pairlists(config=Depends(get_config)):
from freqtrade.resolvers import PairListResolver
pairlists = PairListResolver.search_all_objects(
config, False)
pairlists = sorted(pairlists, key=lambda x: x['name'])
return {'pairlists': [{
"name": x['name'],
"is_pairlist_generator": x['class'].is_pairlist_generator,
"params": x['class'].available_parameters(),
"description": x['class'].description(),
} for x in pairlists
]}
def __run_pairlist(job_id: str, config_loc: Config):
try:
ApiBG.jobs[job_id]['is_running'] = True
from freqtrade.plugins.pairlistmanager import PairListManager
exchange = get_exchange(config_loc)
pairlists = PairListManager(exchange, config_loc)
pairlists.refresh_pairlist()
ApiBG.jobs[job_id]['result'] = {
'method': pairlists.name_list,
'length': len(pairlists.whitelist),
'whitelist': pairlists.whitelist
}
ApiBG.jobs[job_id]['status'] = 'success'
except (OperationalException, Exception) as e:
logger.exception(e)
ApiBG.jobs[job_id]['error'] = str(e)
ApiBG.jobs[job_id]['status'] = 'failed'
finally:
ApiBG.jobs[job_id]['is_running'] = False
ApiBG.pairlist_running = False
@router.post('/pairlists/evaluate', response_model=BgJobStarted, tags=['pairlists', 'webserver'])
def pairlists_evaluate(payload: PairListsPayload, background_tasks: BackgroundTasks,
config=Depends(get_config)):
if ApiBG.pairlist_running:
raise HTTPException(status_code=400, detail='Pairlist evaluation is already running.')
config_loc = deepcopy(config)
config_loc['stake_currency'] = payload.stake_currency
config_loc['pairlists'] = payload.pairlists
handleExchangePayload(payload, config_loc)
# TODO: overwrite blacklist? make it optional and fall back to the one in config?
# Outcome depends on the UI approach.
config_loc['exchange']['pair_blacklist'] = payload.blacklist
# Random job id
job_id = ApiBG.get_job_id()
ApiBG.jobs[job_id] = {
'category': 'pairlist',
'status': 'pending',
'progress': None,
'is_running': False,
'result': {},
'error': None,
}
background_tasks.add_task(__run_pairlist, job_id, config_loc)
ApiBG.pairlist_running = True
return {
'status': 'Pairlist evaluation started in background.',
'job_id': job_id,
}
def handleExchangePayload(payload: ExchangeModePayloadMixin, config_loc: Config):
"""
Handle exchange and trading mode payload.
Updates the configuration with the payload values.
"""
if payload.exchange:
config_loc['exchange']['name'] = payload.exchange
if payload.trading_mode:
config_loc['trading_mode'] = payload.trading_mode
config_loc['candle_type_def'] = CandleType.get_default(
config_loc.get('trading_mode', 'spot') or 'spot')
if payload.margin_mode:
config_loc['margin_mode'] = payload.margin_mode
@router.get('/pairlists/evaluate/{jobid}', response_model=WhitelistEvaluateResponse,
tags=['pairlists', 'webserver'])
def pairlists_evaluate_get(jobid: str):
if not (job := ApiBG.jobs.get(jobid)):
raise HTTPException(status_code=404, detail='Job not found.')
if job['is_running']:
raise HTTPException(status_code=400, detail='Job not finished yet.')
if error := job['error']:
return {
'status': 'failed',
'error': error,
}
return {
'status': 'success',
'result': job['result'],
}

View File

@@ -16,14 +16,14 @@ from freqtrade.exchange.common import remove_exchange_credentials
from freqtrade.misc import deep_merge_dicts
from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest,
BacktestResponse)
from freqtrade.rpc.api_server.deps import get_config, is_webserver_mode
from freqtrade.rpc.api_server.deps import get_config
from freqtrade.rpc.api_server.webserver_bgwork import ApiBG
from freqtrade.rpc.rpc import RPCException
logger = logging.getLogger(__name__)
# Private API, protected by authentication
# Private API, protected by authentication and webserver_mode dependency
router = APIRouter()
@@ -102,7 +102,7 @@ def __run_backtest_bg(btconfig: Config):
@router.post('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest'])
async def api_start_backtest(
bt_settings: BacktestRequest, background_tasks: BackgroundTasks,
config=Depends(get_config), ws_mode=Depends(is_webserver_mode)):
config=Depends(get_config)):
ApiBG.bt['bt_error'] = None
"""Start backtesting if not done so already"""
if ApiBG.bgtask_running:
@@ -143,7 +143,7 @@ async def api_start_backtest(
@router.get('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest'])
def api_get_backtest(ws_mode=Depends(is_webserver_mode)):
def api_get_backtest():
"""
Get backtesting result.
Returns Result after backtesting has been ran.
@@ -188,7 +188,7 @@ def api_get_backtest(ws_mode=Depends(is_webserver_mode)):
@router.delete('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest'])
def api_delete_backtest(ws_mode=Depends(is_webserver_mode)):
def api_delete_backtest():
"""Reset backtesting"""
if ApiBG.bgtask_running:
return {
@@ -215,7 +215,7 @@ def api_delete_backtest(ws_mode=Depends(is_webserver_mode)):
@router.get('/backtest/abort', response_model=BacktestResponse, tags=['webserver', 'backtest'])
def api_backtest_abort(ws_mode=Depends(is_webserver_mode)):
def api_backtest_abort():
if not ApiBG.bgtask_running:
return {
"status": "not_running",
@@ -236,15 +236,14 @@ def api_backtest_abort(ws_mode=Depends(is_webserver_mode)):
@router.get('/backtest/history', response_model=List[BacktestHistoryEntry],
tags=['webserver', 'backtest'])
def api_backtest_history(config=Depends(get_config), ws_mode=Depends(is_webserver_mode)):
def api_backtest_history(config=Depends(get_config)):
# Get backtest result history, read from metadata files
return get_backtest_resultlist(config['user_data_dir'] / 'backtest_results')
@router.get('/backtest/history/result', response_model=BacktestResponse,
tags=['webserver', 'backtest'])
def api_backtest_history_result(filename: str, strategy: str, config=Depends(get_config),
ws_mode=Depends(is_webserver_mode)):
def api_backtest_history_result(filename: str, strategy: str, config=Depends(get_config)):
# Get backtest result history, read from metadata files
fn = config['user_data_dir'] / 'backtest_results' / filename
results: Dict[str, Any] = {

View File

@@ -4,7 +4,14 @@ from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
from freqtrade.constants import DATETIME_PRINT_FORMAT, IntOrInf
from freqtrade.enums import OrderTypeValues, SignalDirection, TradingMode
from freqtrade.enums import MarginMode, OrderTypeValues, SignalDirection, TradingMode
from freqtrade.types import ValidExchangesType
class ExchangeModePayloadMixin(BaseModel):
trading_mode: Optional[TradingMode]
margin_mode: Optional[MarginMode]
exchange: Optional[str]
class Ping(BaseModel):
@@ -27,6 +34,23 @@ class StatusMsg(BaseModel):
status: str
class BgJobStarted(StatusMsg):
job_id: str
class BackgroundTaskStatus(BaseModel):
job_id: str
job_category: str
status: str
running: bool
progress: Optional[float]
class BackgroundTaskResult(BaseModel):
error: Optional[str]
status: str
class ResultMsg(BaseModel):
result: str
@@ -376,6 +400,10 @@ class WhitelistResponse(BaseModel):
method: List[str]
class WhitelistEvaluateResponse(BackgroundTaskResult):
result: Optional[WhitelistResponse]
class DeleteTrade(BaseModel):
cancel_order_count: int
result: str
@@ -396,6 +424,27 @@ class StrategyListResponse(BaseModel):
strategies: List[str]
class ExchangeListResponse(BaseModel):
exchanges: List[ValidExchangesType]
class PairListResponse(BaseModel):
name: str
description: str
is_pairlist_generator: bool
params: Dict[str, Any]
class PairListsResponse(BaseModel):
pairlists: List[PairListResponse]
class PairListsPayload(ExchangeModePayloadMixin, BaseModel):
pairlists: List[Dict[str, Any]]
blacklist: List[str]
stake_currency: str
class FreqAIModelListResponse(BaseModel):
freqaimodels: List[str]

View File

@@ -12,7 +12,8 @@ from freqtrade.exceptions import OperationalException
from freqtrade.rpc import RPC
from freqtrade.rpc.api_server.api_schemas import (AvailablePairs, Balances, BlacklistPayload,
BlacklistResponse, Count, Daily,
DeleteLockRequest, DeleteTrade, ForceEnterPayload,
DeleteLockRequest, DeleteTrade,
ExchangeListResponse, ForceEnterPayload,
ForceEnterResponse, ForceExitPayload,
FreqAIModelListResponse, Health, Locks, Logs,
OpenTradeSchema, PairHistory, PerformanceEntry,
@@ -46,7 +47,9 @@ logger = logging.getLogger(__name__)
# 2.26: increase /balance output
# 2.27: Add /trades/<id>/reload endpoint
# 2.28: Switch reload endpoint to Post
API_VERSION = 2.28
# 2.29: Add /exchanges endpoint
# 2.30: new /pairlists endpoint
API_VERSION = 2.30
# Public API, requires no auth.
router_public = APIRouter()
@@ -312,6 +315,15 @@ def get_strategy(strategy: str, config=Depends(get_config)):
}
@router.get('/exchanges', response_model=ExchangeListResponse, tags=[])
def list_exchanges(config=Depends(get_config)):
from freqtrade.exchange import list_available_exchanges
exchanges = list_available_exchanges(config)
return {
'exchanges': exchanges,
}
@router.get('/freqaimodels', response_model=FreqAIModelListResponse, tags=['freqai'])
def list_freqaimodels(config=Depends(get_config)):
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver

View File

@@ -7,12 +7,14 @@ from fastapi.websockets import WebSocket
from pydantic import ValidationError
from freqtrade.enums import RPCMessageType, RPCRequestType
from freqtrade.exceptions import FreqtradeException
from freqtrade.rpc.api_server.api_auth import validate_ws_token
from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel
from freqtrade.rpc.api_server.ws.message_stream import MessageStream
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema,
WSRequestSchema, WSWhitelistMessage)
from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSErrorMessage,
WSMessageSchema, WSRequestSchema,
WSWhitelistMessage)
from freqtrade.rpc.rpc import RPC
@@ -27,7 +29,13 @@ async def channel_reader(channel: WebSocketChannel, rpc: RPC):
Iterate over the messages from the channel and process the request
"""
async for message in channel:
await _process_consumer_request(message, channel, rpc)
try:
await _process_consumer_request(message, channel, rpc)
except FreqtradeException:
logger.exception(f"Error processing request from {channel}")
response = WSErrorMessage(data='Error processing request')
await channel.send(response.dict(exclude_none=True))
async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream):
@@ -62,13 +70,13 @@ async def _process_consumer_request(
logger.error(f"Invalid request from {channel}: {e}")
return
type, data = websocket_request.type, websocket_request.data
type_, data = websocket_request.type, websocket_request.data
response: WSMessageSchema
logger.debug(f"Request of type {type} from {channel}")
logger.debug(f"Request of type {type_} from {channel}")
# If we have a request of type SUBSCRIBE, set the topics in this channel
if type == RPCRequestType.SUBSCRIBE:
if type_ == RPCRequestType.SUBSCRIBE:
# If the request is empty, do nothing
if not data:
return
@@ -80,7 +88,7 @@ async def _process_consumer_request(
# We don't send a response for subscriptions
return
elif type == RPCRequestType.WHITELIST:
elif type_ == RPCRequestType.WHITELIST:
# Get whitelist
whitelist = rpc._ws_request_whitelist()
@@ -88,7 +96,7 @@ async def _process_consumer_request(
response = WSWhitelistMessage(data=whitelist)
await channel.send(response.dict(exclude_none=True))
elif type == RPCRequestType.ANALYZED_DF:
elif type_ == RPCRequestType.ANALYZED_DF:
# Limit the amount of candles per dataframe to 'limit' or 1500
limit = int(min(data.get('limit', 1500), 1500)) if data else None
pair = data.get('pair', None) if data else None

View File

@@ -1,8 +1,9 @@
from typing import Any, AsyncIterator, Dict, Optional
from uuid import uuid4
from fastapi import Depends
from fastapi import Depends, HTTPException
from freqtrade.constants import Config
from freqtrade.enums import RunMode
from freqtrade.persistence import Trade
from freqtrade.persistence.models import _request_id_ctx_var
@@ -43,12 +44,21 @@ def get_api_config() -> Dict[str, Any]:
return ApiServer._config['api_server']
def _generate_exchange_key(config: Config) -> str:
"""
Exchange key - used for caching the exchange object.
"""
return f"{config['exchange']['name']}_{config.get('trading_mode', 'spot')}"
def get_exchange(config=Depends(get_config)):
if not ApiBG.exchange:
exchange_key = _generate_exchange_key(config)
if not (exchange := ApiBG.exchanges.get(exchange_key)):
from freqtrade.resolvers import ExchangeResolver
ApiBG.exchange = ExchangeResolver.load_exchange(
exchange = ExchangeResolver.load_exchange(
config, load_leverage_tiers=False)
return ApiBG.exchange
ApiBG.exchanges[exchange_key] = exchange
return exchange
def get_message_stream():
@@ -57,5 +67,6 @@ def get_message_stream():
def is_webserver_mode(config=Depends(get_config)):
if config['runmode'] != RunMode.WEBSERVER:
raise RPCException('Bot is not in the correct state')
raise HTTPException(status_code=503,
detail='Bot is not in the correct state.')
return None

View File

@@ -114,10 +114,12 @@ class ApiServer(RPCHandler):
def configure_app(self, app: FastAPI, config):
from freqtrade.rpc.api_server.api_auth import http_basic_or_jwt_token, router_login
from freqtrade.rpc.api_server.api_background_tasks import router as api_bg_tasks
from freqtrade.rpc.api_server.api_backtest import router as api_backtest
from freqtrade.rpc.api_server.api_v1 import router as api_v1
from freqtrade.rpc.api_server.api_v1 import router_public as api_v1_public
from freqtrade.rpc.api_server.api_ws import router as ws_router
from freqtrade.rpc.api_server.deps import is_webserver_mode
from freqtrade.rpc.api_server.web_ui import router_ui
app.include_router(api_v1_public, prefix="/api/v1")
@@ -126,7 +128,12 @@ class ApiServer(RPCHandler):
dependencies=[Depends(http_basic_or_jwt_token)],
)
app.include_router(api_backtest, prefix="/api/v1",
dependencies=[Depends(http_basic_or_jwt_token)],
dependencies=[Depends(http_basic_or_jwt_token),
Depends(is_webserver_mode)],
)
app.include_router(api_bg_tasks, prefix="/api/v1",
dependencies=[Depends(http_basic_or_jwt_token),
Depends(is_webserver_mode)],
)
app.include_router(ws_router, prefix="/api/v1")
app.include_router(router_login, prefix="/api/v1", tags=["auth"])

View File

@@ -1,8 +1,20 @@
from typing import Any, Dict
from typing import Any, Dict, Literal, Optional, TypedDict
from uuid import uuid4
from freqtrade.exchange.exchange import Exchange
class ApiBG():
class JobsContainer(TypedDict):
category: Literal['pairlist']
is_running: bool
status: str
progress: Optional[float]
result: Any
error: Optional[str]
class ApiBG:
# Backtesting type: Backtesting
bt: Dict[str, Any] = {
'bt': None,
@@ -13,4 +25,15 @@ class ApiBG():
}
bgtask_running: bool = False
# Exchange - only available in webserver mode.
exchange = None
exchanges: Dict[str, Exchange] = {}
# Generic background jobs
# TODO: Change this to TTLCache
jobs: Dict[str, JobsContainer] = {}
# Pairlist evaluate things
pairlist_running: bool = False
@staticmethod
def get_job_id() -> str:
return str(uuid4())

View File

@@ -66,4 +66,9 @@ class WSAnalyzedDFMessage(WSMessageSchema):
type: RPCMessageType = RPCMessageType.ANALYZED_DF
data: AnalyzedDFData
class WSErrorMessage(WSMessageSchema):
type: RPCMessageType = RPCMessageType.EXCEPTION
data: str
# --------------------------------------------------------------------------

View File

@@ -755,7 +755,7 @@ class RPC:
return {'status': 'Reloaded from orders from exchange'}
def __exec_force_exit(self, trade: Trade, ordertype: Optional[str],
amount: Optional[float] = None) -> None:
amount: Optional[float] = None) -> bool:
# Check if there is there is an open order
fully_canceled = False
if trade.open_order_id:
@@ -770,6 +770,9 @@ class RPC:
self._freqtrade.handle_cancel_exit(trade, order, CANCEL_REASON['FORCE_EXIT'])
if not fully_canceled:
if trade.open_order_id is not None:
# Order cancellation failed, so we can't exit.
return False
# Get current rate and execute sell
current_rate = self._freqtrade.exchange.get_rate(
trade.pair, side='exit', is_short=trade.is_short, refresh=True)
@@ -790,6 +793,9 @@ class RPC:
trade, current_rate, exit_check, ordertype=order_type,
sub_trade_amt=sub_amount)
return True
return False
def _rpc_force_exit(self, trade_id: str, ordertype: Optional[str] = None, *,
amount: Optional[float] = None) -> Dict[str, str]:
"""
@@ -802,12 +808,12 @@ class RPC:
with self._freqtrade._exit_lock:
if trade_id == 'all':
# Execute sell for all open orders
# Execute exit for all open orders
for trade in Trade.get_open_trades():
self.__exec_force_exit(trade, ordertype)
Trade.commit()
self._freqtrade.wallets.update()
return {'result': 'Created sell orders for all open trades.'}
return {'result': 'Created exit orders for all open trades.'}
# Query for trade
trade = Trade.get_trades(
@@ -817,10 +823,12 @@ class RPC:
logger.warning('force_exit: Invalid argument received')
raise RPCException('invalid argument')
self.__exec_force_exit(trade, ordertype, amount)
result = self.__exec_force_exit(trade, ordertype, amount)
Trade.commit()
self._freqtrade.wallets.update()
return {'result': f'Created sell order for trade {trade_id}.'}
if not result:
raise RPCException('Failed to exit trade.')
return {'result': f'Created exit order for trade {trade_id}.'}
def _force_entry_validations(self, pair: str, order_side: SignalDirection):
if not self._freqtrade.config.get('force_entry_enable', False):

View File

@@ -534,10 +534,10 @@ class Telegram(RPCHandler):
if order_nr == 1:
lines.append(f"*{wording} #{order_nr}:*")
lines.append(
f"*Amount:* {cur_entry_amount} "
f"*Amount:* {cur_entry_amount:.8g} "
f"({round_coin_value(order['cost'], quote_currency)})"
)
lines.append(f"*Average Price:* {cur_entry_average}")
lines.append(f"*Average Price:* {cur_entry_average:.8g}")
else:
sum_stake = 0
sum_amount = 0
@@ -560,9 +560,9 @@ class Telegram(RPCHandler):
if is_open:
lines.append("({})".format(dt_humanize(order["order_filled_date"],
granularity=["day", "hour", "minute"])))
lines.append(f"*Amount:* {cur_entry_amount} "
lines.append(f"*Amount:* {cur_entry_amount:.8g} "
f"({round_coin_value(order['cost'], quote_currency)})")
lines.append(f"*Average {wording} Price:* {cur_entry_average} "
lines.append(f"*Average {wording} Price:* {cur_entry_average:.8g} "
f"({price_to_1st_entry:.2%} from 1st entry Rate)")
lines.append(f"*Order filled:* {order['order_filled_date']}")
@@ -633,11 +633,11 @@ class Telegram(RPCHandler):
])
lines.extend([
"*Open Rate:* `{open_rate:.8f}`",
"*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "",
"*Open Rate:* `{open_rate:.8g}`",
"*Close Rate:* `{close_rate:.8g}`" if r['close_rate'] else "",
"*Open Date:* `{open_date}`",
"*Close Date:* `{close_date}`" if r['close_date'] else "",
" \n*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "",
" \n*Current Rate:* `{current_rate:.8g}`" if r['is_open'] else "",
("*Unrealized Profit:* " if r['is_open'] else "*Close Profit: *")
+ "`{profit_ratio:.2%}` `({profit_abs_r})`",
])
@@ -658,9 +658,9 @@ class Telegram(RPCHandler):
"`({initial_stop_loss_ratio:.2%})`")
# Adding stoploss and stoploss percentage only if it is not None
lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " +
lines.append("*Stoploss:* `{stop_loss_abs:.8g}` " +
("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else ""))
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` "
lines.append("*Stoploss distance:* `{stoploss_current_dist:.8g}` "
"`({stoploss_current_dist_ratio:.2%})`")
if r['open_order']:
lines.append(
@@ -1114,7 +1114,9 @@ class Telegram(RPCHandler):
async def _force_exit_action(self, trade_id):
if trade_id != 'cancel':
try:
self._rpc._rpc_force_exit(trade_id)
loop = asyncio.get_running_loop()
# Workaround to avoid nested loops
await loop.run_in_executor(None, self._rpc._rpc_force_exit, trade_id)
except RPCException as e:
await self._send_msg(str(e))
@@ -1140,7 +1142,11 @@ class Telegram(RPCHandler):
async def _force_enter_action(self, pair, price: Optional[float], order_side: SignalDirection):
if pair != 'cancel':
try:
self._rpc._rpc_force_entry(pair, price, order_side=order_side)
def _force_enter():
self._rpc._rpc_force_entry(pair, price, order_side=order_side)
loop = asyncio.get_running_loop()
# Workaround to avoid nested loops
await loop.run_in_executor(None, _force_enter)
except RPCException as e:
logger.exception("Forcebuy error!")
await self._send_msg(str(e), ParseMode.HTML)

View File

@@ -48,7 +48,7 @@ class IStrategy(ABC, HyperStrategyMixin):
_ft_params_from_file: Dict
# associated minimal roi
minimal_roi: Dict = {"0": 10.0}
minimal_roi: Dict = {}
# associated stoploss
stoploss: float
@@ -168,7 +168,7 @@ class IStrategy(ABC, HyperStrategyMixin):
download_all_data_for_training(self.dp, self.config)
else:
# Gracious failures if freqAI is disabled but "start" is called.
class DummyClass():
class DummyClass:
def start(self, *args, **kwargs):
raise OperationalException(
'freqAI is not enabled. '
@@ -1085,6 +1085,11 @@ class IStrategy(ABC, HyperStrategyMixin):
exits: List[ExitCheckTuple] = []
current_rate = rate
current_profit = trade.calc_profit_ratio(current_rate)
current_profit_best = current_profit
if low is not None or high is not None:
# Set current rate to high for backtesting ROI exits
current_rate_best = (low if trade.is_short else high) or rate
current_profit_best = trade.calc_profit_ratio(current_rate_best)
trade.adjust_min_max_rates(high or current_rate, low or current_rate)
@@ -1093,20 +1098,13 @@ class IStrategy(ABC, HyperStrategyMixin):
current_profit=current_profit,
force_stoploss=force_stoploss, low=low, high=high)
# Set current rate to high for backtesting exits
current_rate = (low if trade.is_short else high) or rate
current_profit = trade.calc_profit_ratio(current_rate)
# if enter signal and ignore_roi is set, we don't need to evaluate min_roi.
roi_reached = (not (enter and self.ignore_roi_if_entry_signal)
and self.min_roi_reached(trade=trade, current_profit=current_profit,
and self.min_roi_reached(trade=trade, current_profit=current_profit_best,
current_time=current_time))
exit_signal = ExitType.NONE
custom_reason = ''
# use provided rate in backtesting, not high/low.
current_rate = rate
current_profit = trade.calc_profit_ratio(current_rate)
if self.use_exit_signal:
if exit_ and not enter:
@@ -1265,7 +1263,7 @@ class IStrategy(ABC, HyperStrategyMixin):
:return: minimal ROI entry value or None if none proper ROI entry was found.
"""
# Get highest entry in ROI dict where key <= trade-duration
roi_list = list(filter(lambda x: x <= trade_dur, self.minimal_roi.keys()))
roi_list = [x for x in self.minimal_roi.keys() if x <= trade_dur]
if not roi_list:
return None, None
roi_entry = max(roi_list)
@@ -1302,7 +1300,7 @@ class IStrategy(ABC, HyperStrategyMixin):
timedout = (order.status == 'open' and order.order_date_utc < timeout_threshold)
if timedout:
return True
time_method = (self.check_exit_timeout if order.side == trade.exit_side
time_method = (self.check_exit_timeout if order.ft_order_side == trade.exit_side
else self.check_entry_timeout)
return strategy_safe_wrapper(time_method,

View File

@@ -232,7 +232,7 @@ class FreqaiExampleStrategy(IStrategy):
# All indicators must be populated by feature_engineering_*() functions
# the model will return all labels created by user in `feature_engineering_*`
# the model will return all labels created by user in `set_freqai_targets()`
# (& appended targets), an indication of whether or not the prediction should be accepted,
# the target mean/std values for each of the labels created by user in
# `set_freqai_targets()` for each training period.

View File

@@ -0,0 +1 @@
from freqtrade.types.valid_exchanges_type import ValidExchangesType # noqa: F401

View File

@@ -0,0 +1,17 @@
# Used for list-exchanges
from typing import List
from typing_extensions import TypedDict
class TradeModeType(TypedDict):
trading_mode: str
margin_mode: str
class ValidExchangesType(TypedDict):
name: str
valid: bool
supported: bool
comment: str
trade_modes: List[TradeModeType]

View File

@@ -3,7 +3,7 @@ import logging
from packaging import version
from sqlalchemy import select
from freqtrade.constants import Config
from freqtrade.constants import DOCS_LINK, Config
from freqtrade.enums.tradingmode import TradingMode
from freqtrade.exceptions import OperationalException
from freqtrade.persistence.pairlock import PairLock
@@ -25,7 +25,7 @@ def migrate_binance_futures_names(config: Config):
if version.parse("2.6.26") > version.parse(ccxt.__version__):
raise OperationalException(
"Please follow the update instructions in the docs "
"(https://www.freqtrade.io/en/latest/updating/) to install a compatible ccxt version.")
f"({DOCS_LINK}/updating/) to install a compatible ccxt version.")
_migrate_binance_futures_db(config)
migrate_binance_futures_data(config)

View File

@@ -22,6 +22,7 @@ nav:
- Web Hook: webhook-config.md
- Data Downloading: data-download.md
- Backtesting: backtesting.md
- Lookahead analysis: lookahead-analysis.md
- Hyperopt: hyperopt.md
- FreqAI:
- Introduction: freqai.md

View File

@@ -7,24 +7,24 @@
-r docs/requirements-docs.txt
coveralls==3.3.1
ruff==0.0.269
mypy==1.3.0
pre-commit==3.3.2
pytest==7.3.1
ruff==0.0.275
mypy==1.4.1
pre-commit==3.3.3
pytest==7.4.0
pytest-asyncio==0.21.0
pytest-cov==4.0.0
pytest-mock==3.10.0
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-random-order==1.1.0
isort==5.12.0
# For datetime mocking
time-machine==2.9.0
time-machine==2.10.0
# Convert jupyter notebooks to markdown documents
nbconvert==7.4.0
nbconvert==7.6.0
# mypy types
types-cachetools==5.3.0.5
types-filelock==3.2.7
types-requests==2.30.0.0
types-requests==2.31.0.1
types-tabulate==0.9.0.2
types-python-dateutil==2.8.19.13

View File

@@ -5,7 +5,7 @@
torch==2.0.1
#until these branches will be released we can use this
gymnasium==0.28.1
stable_baselines3==2.0.0a10
stable_baselines3==2.0.0a13
sb3_contrib>=2.0.0a9
# Progress bar for stable-baselines3 and sb3-contrib
tqdm==4.65.0

Some files were not shown because too many files have changed in this diff Show More