diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 6389a50df..748efc4d9 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,11 +1,12 @@ -FROM freqtradeorg/freqtrade:develop +FROM freqtradeorg/freqtrade:develop_freqairl USER root # Install dependencies COPY requirements-dev.txt /freqtrade/ RUN apt-get update \ - && apt-get -y install git mercurial sudo vim build-essential \ + && apt-get -y install --no-install-recommends apt-utils dialog \ + && apt-get -y install --no-install-recommends git sudo vim build-essential \ && apt-get clean \ && mkdir -p /home/ftuser/.vscode-server /home/ftuser/.vscode-server-insiders /home/ftuser/commandhistory \ && echo "export PROMPT_COMMAND='history -a'" >> /home/ftuser/.bashrc \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 8fb643e8f..08b8240b9 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -19,23 +19,24 @@ "postCreateCommand": "freqtrade create-userdir --userdir user_data/", "workspaceFolder": "/workspaces/freqtrade", - - "settings": { - "terminal.integrated.shell.linux": "/bin/bash", - "editor.insertSpaces": true, - "files.trimTrailingWhitespace": true, - "[markdown]": { - "files.trimTrailingWhitespace": false, + "customizations": { + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "editor.insertSpaces": true, + "files.trimTrailingWhitespace": true, + "[markdown]": { + "files.trimTrailingWhitespace": false, + }, + "python.pythonPath": "/usr/local/bin/python", }, - "python.pythonPath": "/usr/local/bin/python", - }, - // Add the IDs of extensions you want installed when the container is created. - "extensions": [ - "ms-python.python", - "ms-python.vscode-pylance", - "davidanson.vscode-markdownlint", - "ms-azuretools.vscode-docker", - "vscode-icons-team.vscode-icons", - ], + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "davidanson.vscode-markdownlint", + "ms-azuretools.vscode-docker", + "vscode-icons-team.vscode-icons", + ], + } } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9ecd27cc3..8ceac4a7f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -136,6 +136,7 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} + check-latest: true - name: Cache_dependencies uses: actions/cache@v3 @@ -159,7 +160,8 @@ jobs: - name: Installation - macOS if: runner.os == 'macOS' run: | - brew update + # brew update + # TODO: Should be the brew upgrade # homebrew fails to update python due to unlinking failures # https://github.com/actions/runner-images/issues/6817 rm /usr/local/bin/2to3 || true @@ -459,7 +461,7 @@ jobs: python setup.py sdist bdist_wheel - name: Publish to PyPI (Test) - uses: pypa/gh-action-pypi-publish@v1.8.6 + uses: pypa/gh-action-pypi-publish@v1.8.7 if: (github.event_name == 'release') with: user: __token__ @@ -467,7 +469,7 @@ jobs: repository_url: https://test.pypi.org/legacy/ - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.6 + uses: pypa/gh-action-pypi-publish@v1.8.7 if: (github.event_name == 'release') with: user: __token__ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4be298d7b..56c8a6010 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,17 +8,17 @@ repos: # stages: [push] - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.0.1" + rev: "v1.3.0" hooks: - id: mypy exclude: build_helpers additional_dependencies: - types-cachetools==5.3.0.5 - types-filelock==3.2.7 - - types-requests==2.30.0.0 + - types-requests==2.31.0.1 - types-tabulate==0.9.0.2 - types-python-dateutil==2.8.19.13 - - SQLAlchemy==2.0.15 + - SQLAlchemy==2.0.17 # stages: [push] - repo: https://github.com/pycqa/isort @@ -30,7 +30,7 @@ repos: - repo: https://github.com/charliermarsh/ruff-pre-commit # Ruff version. - rev: 'v0.0.263' + rev: 'v0.0.270' hooks: - id: ruff diff --git a/Dockerfile b/Dockerfile index d3890a25b..b5f6f5d5e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.10.11-slim-bullseye as base +FROM python:3.11.4-slim-bullseye as base # Setup env ENV LANG C.UTF-8 diff --git a/docs/advanced-hyperopt.md b/docs/advanced-hyperopt.md index ff0521f4f..eb8bf3f84 100644 --- a/docs/advanced-hyperopt.md +++ b/docs/advanced-hyperopt.md @@ -136,7 +136,7 @@ class MyAwesomeStrategy(IStrategy): ### Dynamic parameters -Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called. +Parameters can also be defined dynamically, but must be available to the instance once the [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called. ``` python diff --git a/docs/data-download.md b/docs/data-download.md index a7b1987aa..d45c7ef63 100644 --- a/docs/data-download.md +++ b/docs/data-download.md @@ -6,7 +6,7 @@ To download data (candles / OHLCV) needed for backtesting and hyperoptimization If no additional parameter is specified, freqtrade will download data for `"1m"` and `"5m"` timeframes for the last 30 days. Exchange and pairs will come from `config.json` (if specified using `-c/--config`). -Otherwise `--exchange` becomes mandatory. +Without provided configuration, `--exchange` becomes mandatory. You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used. @@ -83,40 +83,47 @@ Common arguments: ``` +!!! Tip "Downloading all data for one quote currency" + Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand: + `freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange. + To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command. + !!! Note "Startup period" `download-data` is a strategy-independent command. The idea is to download a big chunk of data once, and then iteratively increase the amount of data stored. For that reason, `download-data` does not care about the "startup-period" defined in a strategy. It's up to the user to download additional days if the backtest should start at a specific point in time (while respecting startup period). -### Pairs file +### Start download -In alternative to the whitelist from `config.json`, a `pairs.json` file can be used. -If you are using Binance for example: - -- create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory. -- update the `pairs.json` file to contain the currency pairs you are interested in. +A very simple command (assuming an available `config.json` file) can look as follows. ```bash -mkdir -p user_data/data/binance -touch user_data/data/binance/pairs.json +freqtrade download-data --exchange binance ``` -The format of the `pairs.json` file is a simple json list. -Mixing different stake-currencies is allowed for this file, since it's only used for downloading. +This will download historical candle (OHLCV) data for all the currency pairs defined in the configuration. -``` json -[ - "ETH/BTC", - "ETH/USDT", - "BTC/USDT", - "XRP/ETH" -] +Alternatively, specify the pairs directly + +```bash +freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT ``` -!!! Tip "Downloading all data for one quote currency" - Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand: - `freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange. - To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command. +or as regex (in this case, to download all active USDT pairs) + +```bash +freqtrade download-data --exchange binance --pairs .*/USDT +``` + +### Other Notes + +* To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`. +* To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.) +* To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`. +* To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days). +* To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020. +* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data. +* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options. ??? Note "Permission denied errors" If your configuration directory `user_data` was made by docker, you may get the following error: @@ -131,39 +138,7 @@ Mixing different stake-currencies is allowed for this file, since it's only used sudo chown -R $UID:$GID user_data ``` -### Start download - -Then run: - -```bash -freqtrade download-data --exchange binance -``` - -This will download historical candle (OHLCV) data for all the currency pairs you defined in `pairs.json`. - -Alternatively, specify the pairs directly - -```bash -freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT -``` - -or as regex (to download all active USDT pairs) - -```bash -freqtrade download-data --exchange binance --pairs .*/USDT -``` - -### Other Notes - -- To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`. -- To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.) -- To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`. -- To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days). -- To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020. -- Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data. -- To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options. - -#### Download additional data before the current timerange +### Download additional data before the current timerange Assuming you downloaded all data from 2022 (`--timerange 20220101-`) - but you'd now like to also backtest with earlier data. You can do so by using the `--prepend` flag, combined with `--timerange` - specifying an end-date. @@ -238,7 +213,36 @@ Size has been taken from the BTC/USDT 1m spot combination for the timerange spec To have a best performance/size mix, we recommend the use of either feather or parquet. -#### Sub-command convert data +### Pairs file + +In alternative to the whitelist from `config.json`, a `pairs.json` file can be used. +If you are using Binance for example: + +* create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory. +* update the `pairs.json` file to contain the currency pairs you are interested in. + +```bash +mkdir -p user_data/data/binance +touch user_data/data/binance/pairs.json +``` + +The format of the `pairs.json` file is a simple json list. +Mixing different stake-currencies is allowed for this file, since it's only used for downloading. + +``` json +[ + "ETH/BTC", + "ETH/USDT", + "BTC/USDT", + "XRP/ETH" +] +``` + +!!! Note + The `pairs.json` file is only used when no configuration is loaded (implicitly by naming, or via `--config` flag). + You can force the usage of this file via `--pairs-file pairs.json` - however we recommend to use the pairlist from within the configuration, either via `exchange.pair_whitelist` or `pairs` setting in the configuration. + +## Sub-command convert data ``` usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] @@ -290,7 +294,7 @@ Common arguments: ``` -##### Example converting data +### Example converting data The following command will convert all candle (OHLCV) data available in `~/.freqtrade/data/binance` from json to jsongz, saving diskspace in the process. It'll also remove original json data files (`--erase` parameter). @@ -299,7 +303,7 @@ It'll also remove original json data files (`--erase` parameter). freqtrade convert-data --format-from json --format-to jsongz --datadir ~/.freqtrade/data/binance -t 5m 15m --erase ``` -#### Sub-command convert trade data +## Sub-command convert trade data ``` usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] @@ -342,7 +346,7 @@ Common arguments: ``` -##### Example converting trades +### Example converting trades The following command will convert all available trade-data in `~/.freqtrade/data/kraken` from jsongz to json. It'll also remove original jsongz data files (`--erase` parameter). @@ -351,7 +355,7 @@ It'll also remove original jsongz data files (`--erase` parameter). freqtrade convert-trade-data --format-from jsongz --format-to json --datadir ~/.freqtrade/data/kraken --erase ``` -### Sub-command trades to ohlcv +## Sub-command trades to ohlcv When you need to use `--dl-trades` (kraken only) to download data, conversion of trades data to ohlcv data is the last step. This command will allow you to repeat this last step for additional timeframes without re-downloading the data. @@ -400,13 +404,13 @@ Common arguments: ``` -#### Example trade-to-ohlcv conversion +### Example trade-to-ohlcv conversion ``` bash freqtrade trades-to-ohlcv --exchange kraken -t 5m 1h 1d --pairs BTC/EUR ETH/EUR ``` -### Sub-command list-data +## Sub-command list-data You can get a list of downloaded data using the `list-data` sub-command. @@ -451,7 +455,7 @@ Common arguments: ``` -#### Example list-data +### Example list-data ```bash > freqtrade list-data --userdir ~/.freqtrade/user_data/ @@ -465,7 +469,7 @@ ETH/BTC 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h ``` -### Trades (tick) data +## Trades (tick) data By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API. This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes. diff --git a/docs/freqai-configuration.md b/docs/freqai-configuration.md index 43c9fee75..090fa8415 100644 --- a/docs/freqai-configuration.md +++ b/docs/freqai-configuration.md @@ -43,10 +43,10 @@ The FreqAI strategy requires including the following lines of code in the standa def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - # the model will return all labels created by user in `set_freqai_labels()` + # the model will return all labels created by user in `set_freqai_targets()` # (& appended targets), an indication of whether or not the prediction should be accepted, # the target mean/std values for each of the labels created by user in - # `feature_engineering_*` for each training period. + # `set_freqai_targets()` for each training period. dataframe = self.freqai.start(dataframe, metadata, self) @@ -160,7 +160,7 @@ Below are the values you can expect to include/use inside a typical strategy dat |------------|-------------| | `df['&*']` | Any dataframe column prepended with `&` in `set_freqai_targets()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`.
**Datatype:** Depends on the output of the model. | `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`).
**Datatype:** Float. -| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`.
**Datatype:** Integer between -2 and 2. +| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`.
**Datatype:** Integer between -2 and 2. | `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di).
**Datatype:** Float. | `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md).
**Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`.
**Datatype:** Depends on the output of the model. diff --git a/docs/freqai-feature-engineering.md b/docs/freqai-feature-engineering.md index 82b7569a5..daf645339 100644 --- a/docs/freqai-feature-engineering.md +++ b/docs/freqai-feature-engineering.md @@ -180,6 +180,9 @@ You can ask for each of the defined features to be included also for informative In total, the number of features the user of the presented example strat has created is: length of `include_timeframes` * no. features in `feature_engineering_expand_*()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles` $= 3 * 3 * 3 * 2 * 2 = 108$. + + !!! note "Learn more about creative feature engineering" + Check out our [medium article](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665) geared toward helping users learn how to creatively engineer features. ### Gain finer control over `feature_engineering_*` functions with `metadata` @@ -209,41 +212,7 @@ Another example, where the user wants to use live metrics from the trade databas You need to set the standard dictionary in the config so that FreqAI can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, the pre-set values are what will be returned. -## Feature normalization - -FreqAI is strict when it comes to data normalization. The train features, $X^{train}$, are always normalized to [-1, 1] using a shifted min-max normalization: - -$$X^{train}_{norm} = 2 * \frac{X^{train} - X^{train}.min()}{X^{train}.max() - X^{train}.min()} - 1$$ - -All other data (test data and unseen prediction data in dry/live/backtest) is always automatically normalized to the training feature space according to industry standards. FreqAI stores all the metadata required to ensure that test and prediction features will be properly normalized and that predictions are properly denormalized. For this reason, it is not recommended to eschew industry standards and modify FreqAI internals - however - advanced users can do so by inheriting `train()` in their custom `IFreqaiModel` and using their own normalization functions. - -## Data dimensionality reduction with Principal Component Analysis - -You can reduce the dimensionality of your features by activating the `principal_component_analysis` in the config: - -```json - "freqai": { - "feature_parameters" : { - "principal_component_analysis": true - } - } -``` - -This will perform PCA on the features and reduce their dimensionality so that the explained variance of the data set is >= 0.999. Reducing data dimensionality makes training the model faster and hence allows for more up-to-date models. - -## Inlier metric - -The `inlier_metric` is a metric aimed at quantifying how similar the features of a data point are to the most recent historical data points. - -You define the lookback window by setting `inlier_metric_window` and FreqAI computes the distance between the present time point and each of the previous `inlier_metric_window` lookback points. A Weibull function is fit to each of the lookback distributions and its cumulative distribution function (CDF) is used to produce a quantile for each lookback point. The `inlier_metric` is then computed for each time point as the average of the corresponding lookback quantiles. The figure below explains the concept for an `inlier_metric_window` of 5. - -![inlier-metric](assets/freqai_inlier-metric.jpg) - -FreqAI adds the `inlier_metric` to the training features and hence gives the model access to a novel type of temporal information. - -This function does **not** remove outliers from the data set. - -## Weighting features for temporal importance +### Weighting features for temporal importance FreqAI allows you to set a `weight_factor` to weight recent data more strongly than past data via an exponential function: @@ -253,13 +222,103 @@ where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. B ![weight-factor](assets/freqai_weight-factor.jpg) +## Building the data pipeline + +By default, FreqAI builds a dynamic pipeline based on user congfiguration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`. + +!!! note "More information available" + Please review the [parameter table](freqai-parameter-table.md) for more information on these parameters. + + +### Customizing the pipeline + +Users are encouraged to customize the data pipeline to their needs by building their own data pipeline. This can be done by simply setting `dk.feature_pipeline` to their desired `Pipeline` object inside their `IFreqaiModel` `train()` function, or if they prefer not to touch the `train()` function, they can override `define_data_pipeline`/`define_label_pipeline` functions in their `IFreqaiModel`: + +!!! note "More information available" + FreqAI uses the the [`DataSieve`](https://github.com/emergentmethods/datasieve) pipeline, which follows the SKlearn pipeline API, but adds, among other features, coherence between the X, y, and sample_weight vector point removals, feature removal, feature name following. + +```python +from datasieve.transforms import SKLearnWrapper, DissimilarityIndex +from datasieve.pipeline import Pipeline +from sklearn.preprocessing import QuantileTransformer, StandardScaler +from freqai.base_models import BaseRegressionModel + + +class MyFreqaiModel(BaseRegressionModel): + """ + Some cool custom model + """ + def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any: + """ + My custom fit function + """ + model = cool_model.fit() + return model + + def define_data_pipeline(self) -> Pipeline: + """ + User defines their custom feature pipeline here (if they wish) + """ + feature_pipeline = Pipeline([ + ('qt', SKLearnWrapper(QuantileTransformer(output_distribution='normal'))), + ('di', ds.DissimilarityIndex(di_threshold=1) + ]) + + return feature_pipeline + + def define_label_pipeline(self) -> Pipeline: + """ + User defines their custom label pipeline here (if they wish) + """ + label_pipeline = Pipeline([ + ('qt', SKLearnWrapper(StandardScaler())), + ]) + + return label_pipeline +``` + +Here, you are defining the exact pipeline that will be used for your feature set during training and prediction. You can use *most* SKLearn transformation steps by wrapping them in the `SKLearnWrapper` class as shown above. In addition, you can use any of the transformations available in the [`DataSieve` library](https://github.com/emergentmethods/datasieve). + +You can easily add your own transformation by creating a class that inherits from the datasieve `BaseTransform` and implementing your `fit()`, `transform()` and `inverse_transform()` methods: + +```python +from datasieve.transforms.base_transform import BaseTransform +# import whatever else you need + +class MyCoolTransform(BaseTransform): + def __init__(self, **kwargs): + self.param1 = kwargs.get('param1', 1) + + def fit(self, X, y=None, sample_weight=None, feature_list=None, **kwargs): + # do something with X, y, sample_weight, or/and feature_list + return X, y, sample_weight, feature_list + + def transform(self, X, y=None, sample_weight=None, + feature_list=None, outlier_check=False, **kwargs): + # do something with X, y, sample_weight, or/and feature_list + return X, y, sample_weight, feature_list + + def inverse_transform(self, X, y=None, sample_weight=None, feature_list=None, **kwargs): + # do/dont do something with X, y, sample_weight, or/and feature_list + return X, y, sample_weight, feature_list +``` + +!!! note "Hint" + You can define this custom class in the same file as your `IFreqaiModel`. + +### Migrating a custom `IFreqaiModel` to the new Pipeline + +If you have created your own custom `IFreqaiModel` with a custom `train()`/`predict()` function, *and* you still rely on `data_cleaning_train/predict()`, then you will need to migrate to the new pipeline. If your model does *not* rely on `data_cleaning_train/predict()`, then you do not need to worry about this migration. + +More details about the migration can be found [here](strategy_migration.md#freqai---new-data-pipeline). + ## Outlier detection Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. FreqAI implements a variety of methods to identify such outliers and hence mitigate risk. ### Identifying outliers with the Dissimilarity Index (DI) - The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model. +The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model. You can tell FreqAI to remove outlier data points from the training/test data sets using the DI by including the following statement in the config: @@ -271,7 +330,7 @@ You can tell FreqAI to remove outlier data points from the training/test data se } ``` - The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points: +Which will add `DissimilarityIndex` step to your `feature_pipeline` and set the threshold to 1. The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points: $$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$ @@ -305,9 +364,9 @@ You can tell FreqAI to remove outlier data points from the training/test data se } ``` -The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed. +Which will add `SVMOutlierExtractor` step to your `feature_pipeline`. The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed. -FreqAI uses `sklearn.linear_model.SGDOneClassSVM` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDOneClassSVM.html) (external website)) and you can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu`. +You can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu` via the `feature_parameters.svm_params` dictionary in the config. The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time. @@ -325,7 +384,7 @@ You can configure FreqAI to use DBSCAN to cluster and remove outliers from the t } ``` -DBSCAN is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be. +Which will add the `DataSieveDBSCAN` step to your `feature_pipeline`. This is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be. Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters the data set by setting all data points that have $N-1$ other data points within a distance of $\varepsilon$ as *core points*. A data point that is within a distance of $\varepsilon$ from a *core point* but that does not have $N-1$ other data points within a distance of $\varepsilon$ from itself is considered an *edge point*. A cluster is then the collection of *core points* and *edge points*. Data points that have no other data points at a distance $<\varepsilon$ are considered outliers. The figure below shows a cluster with $N = 3$. diff --git a/docs/freqai.md b/docs/freqai.md index 3c4f47212..820fb81f6 100644 --- a/docs/freqai.md +++ b/docs/freqai.md @@ -76,7 +76,7 @@ pip install -r requirements-freqai.txt ### Usage with docker -If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices. +If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices. If you would like to use PyTorch or Reinforcement learning, you should use the torch or RL tags, `image: freqtradeorg/freqtrade:develop_freqaitorch`, `image: freqtradeorg/freqtrade:develop_freqairl`. !!! note "docker-compose-freqai.yml" We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file. This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available. @@ -107,6 +107,13 @@ This is for performance reasons - FreqAI relies on making quick predictions/retr it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume). +## Additional learning materials + +Here we compile some external materials that provide deeper looks into various components of FreqAI: + +- [Real-time head-to-head: Adaptive modeling of financial market data using XGBoost and CatBoost](https://emergentmethods.medium.com/real-time-head-to-head-adaptive-modeling-of-financial-market-data-using-xgboost-and-catboost-995a115a7495) +- [FreqAI - from price to prediction](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665) + ## Credits FreqAI is developed by a group of individuals who all contribute specific skillsets to the project. diff --git a/docs/lookahead-analysis.md b/docs/lookahead-analysis.md new file mode 100644 index 000000000..9d57de779 --- /dev/null +++ b/docs/lookahead-analysis.md @@ -0,0 +1,100 @@ +# Lookahead analysis + +This page explains how to validate your strategy in terms of look ahead bias. + +Checking look ahead bias is the bane of any strategy since it is sometimes very easy to introduce backtest bias - +but very hard to detect. + +Backtesting initializes all timestamps at once and calculates all indicators in the beginning. +This means that if your indicators or entry/exit signals could look into future candles and falsify your backtest. + +Lookahead-analysis requires historic data to be available. +To learn how to get data for the pairs and exchange you're interested in, +head over to the [Data Downloading](data-download.md) section of the documentation. + +This command is built upon backtesting since it internally chains backtests and pokes at the strategy to provoke it to show look ahead bias. +This is done by not looking at the strategy itself - but at the results it returned. +The results are things like changed indicator-values and moved entries/exits compared to the full backtest. + +You can use commands of [Backtesting](backtesting.md). +It also supports the lookahead-analysis of freqai strategies. + +- `--cache` is forced to "none". +- `--max-open-trades` is forced to be at least equal to the number of pairs. +- `--dry-run-wallet` is forced to be basically infinite. + +## Lookahead-analysis command reference + +``` +usage: freqtrade lookahead-analysis [-h] [-v] [--logfile FILE] [-V] [-c PATH] + [-d PATH] [--userdir PATH] [-s NAME] + [--strategy-path PATH] + [--recursive-strategy-search] + [--freqaimodel NAME] + [--freqaimodel-path PATH] [-i TIMEFRAME] + [--timerange TIMERANGE] + [--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}] + [--max-open-trades INT] + [--stake-amount STAKE_AMOUNT] + [--fee FLOAT] [-p PAIRS [PAIRS ...]] + [--enable-protections] + [--dry-run-wallet DRY_RUN_WALLET] + [--timeframe-detail TIMEFRAME_DETAIL] + [--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]] + [--export {none,trades,signals}] + [--export-filename PATH] + [--breakdown {day,week,month} [{day,week,month} ...]] + [--cache {none,day,week,month}] + [--freqai-backtest-live-models] + [--minimum-trade-amount INT] + [--targeted-trade-amount INT] + [--lookahead-analysis-exportfilename LOOKAHEAD_ANALYSIS_EXPORTFILENAME] + +options: + --minimum-trade-amount INT + Minimum trade amount for lookahead-analysis + --targeted-trade-amount INT + Targeted trade amount for lookahead analysis + --lookahead-analysis-exportfilename LOOKAHEAD_ANALYSIS_EXPORTFILENAME + Use this csv-filename to store lookahead-analysis- + results +``` + +!!! Note "" + The above Output was reduced to options `lookahead-analysis` adds on top of regular backtesting commands. + +### Summary + +Checks a given strategy for look ahead bias via lookahead-analysis +Look ahead bias means that the backtest uses data from future candles thereby not making it viable beyond backtesting +and producing false hopes for the one backtesting. + +### Introduction + +Many strategies - without the programmer knowing - have fallen prey to look ahead bias. + +Any backtest will populate the full dataframe including all time stamps at the beginning. +If the programmer is not careful or oblivious how things work internally +(which sometimes can be really hard to find out) then it will just look into the future making the strategy amazing +but not realistic. + +This command is made to try to verify the validity in the form of the aforementioned look ahead bias. + +### How does the command work? + +It will start with a backtest of all pairs to generate a baseline for indicators and entries/exits. +After the backtest ran, it will look if the `minimum-trade-amount` is met +and if not cancel the lookahead-analysis for this strategy. + +After setting the baseline it will then do additional runs for every entry and exit separately. +When a verification-backtest is done, it will compare the indicators as the signal (either entry or exit) and report the bias. +After all signals have been verified or falsified a result-table will be generated for the user to see. + +### Caveats + +- `lookahead-analysis` can only verify / falsify the trades it calculated and verified. +If the strategy has many different signals / signal types, it's up to you to select appropriate parameters to ensure that all signals have triggered at least once. Not triggered signals will not have been verified. +This could lead to a false-negative (the strategy will then be reported as non-biased). +- `lookahead-analysis` has access to everything that backtesting has too. +Please don't provoke any configs like enabling position stacking. +If you decide to do so, then make doubly sure that you won't ever run out of `max_open_trades` amount and neither leftover money in your wallet. diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index c5e478c78..d6ee2fd14 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -1,6 +1,6 @@ markdown==3.3.7 mkdocs==1.4.3 -mkdocs-material==9.1.14 +mkdocs-material==9.1.17 mdx_truly_sane_lists==1.3 pymdown-extensions==10.0.1 jinja2==3.1.2 diff --git a/docs/strategy-customization.md b/docs/strategy-customization.md index 8b6654c6c..8913d787b 100644 --- a/docs/strategy-customization.md +++ b/docs/strategy-customization.md @@ -342,16 +342,12 @@ The above configuration would therefore mean: The calculation does include fees. -To disable ROI completely, set it to an insanely high number: +To disable ROI completely, set it to an empty dictionary: ```python -minimal_roi = { - "0": 100 -} +minimal_roi = {} ``` -While technically not completely disabled, this would exit once the trade reaches 10000% Profit. - To use times based on candle duration (timeframe), the following snippet can be handy. This will allow you to change the timeframe for the strategy, and ROI times will still be set as candles (e.g. after 3 candles ...) diff --git a/docs/strategy_migration.md b/docs/strategy_migration.md index 5ef7a5a4c..d00349d1d 100644 --- a/docs/strategy_migration.md +++ b/docs/strategy_migration.md @@ -728,3 +728,86 @@ Targets now get their own, dedicated method. return dataframe ``` + + +### FreqAI - New data Pipeline + +If you have created your own custom `IFreqaiModel` with a custom `train()`/`predict()` function, *and* you still rely on `data_cleaning_train/predict()`, then you will need to migrate to the new pipeline. If your model does *not* rely on `data_cleaning_train/predict()`, then you do not need to worry about this migration. That means that this migration guide is relevant for a very small percentage of power-users. If you stumbled upon this guide by mistake, feel free to inquire in depth about your problem in the Freqtrade discord server. + +The conversion involves first removing `data_cleaning_train/predict()` and replacing them with a `define_data_pipeline()` and `define_label_pipeline()` function to your `IFreqaiModel` class: + +```python linenums="1" hl_lines="11-14 47-49 55-57" +class MyCoolFreqaiModel(BaseRegressionModel): + """ + Some cool custom IFreqaiModel you made before Freqtrade version 2023.6 + """ + def train( + self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs + ) -> Any: + + # ... your custom stuff + + # Remove these lines + # data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered) + # self.data_cleaning_train(dk) + # data_dictionary = dk.normalize_data(data_dictionary) + # (1) + + # Add these lines. Now we control the pipeline fit/transform ourselves + dd = dk.make_train_test_datasets(features_filtered, labels_filtered) + dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) + dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count) + + (dd["train_features"], + dd["train_labels"], + dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], + dd["train_labels"], + dd["train_weights"]) + + (dd["test_features"], + dd["test_labels"], + dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], + dd["test_labels"], + dd["test_weights"]) + + dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) + dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) + + # ... your custom code + + return model + + def predict( + self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs + ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + + # ... your custom stuff + + # Remove these lines: + # self.data_cleaning_predict(dk) + # (2) + + # Add these lines: + dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( + dk.data_dictionary["prediction_features"], outlier_check=True) + + # Remove this line + # pred_df = dk.denormalize_labels_from_metadata(pred_df) + # (3) + + # Replace with these lines + pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df) + if self.freqai_info.get("DI_threshold", 0) > 0: + dk.DI_values = dk.feature_pipeline["di"].di_values + else: + dk.DI_values = np.zeros(outliers.shape[0]) + dk.do_predict = outliers + + # ... your custom code + return (pred_df, dk.do_predict) +``` + + +1. Data normalization and cleaning is now homogenized with the new pipeline definition. This is created in the new `define_data_pipeline()` and `define_label_pipeline()` functions. The `data_cleaning_train()` and `data_cleaning_predict()` functions are no longer used. You can override `define_data_pipeline()` to create your own custom pipeline if you wish. +2. Data normalization and cleaning is now homogenized with the new pipeline definition. This is created in the new `define_data_pipeline()` and `define_label_pipeline()` functions. The `data_cleaning_train()` and `data_cleaning_predict()` functions are no longer used. You can override `define_data_pipeline()` to create your own custom pipeline if you wish. +3. Data denormalization is done with the new pipeline. Replace this with the lines below. diff --git a/environment.yml b/environment.yml deleted file mode 100644 index e69de29bb..000000000 diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py index bd54de301..7a0a675d3 100644 --- a/freqtrade/__init__.py +++ b/freqtrade/__init__.py @@ -1,5 +1,5 @@ """ Freqtrade bot """ -__version__ = '2023.5.1' +__version__ = '2023.6' if 'dev' in __version__: from pathlib import Path diff --git a/freqtrade/commands/__init__.py b/freqtrade/commands/__init__.py index 66a9c995b..b9346fd5f 100644 --- a/freqtrade/commands/__init__.py +++ b/freqtrade/commands/__init__.py @@ -19,7 +19,8 @@ from freqtrade.commands.list_commands import (start_list_exchanges, start_list_f start_list_markets, start_list_strategies, start_list_timeframes, start_show_trades) from freqtrade.commands.optimize_commands import (start_backtesting, start_backtesting_show, - start_edge, start_hyperopt) + start_edge, start_hyperopt, + start_lookahead_analysis) from freqtrade.commands.pairlist_commands import start_test_pairlist from freqtrade.commands.plot_commands import start_plot_dataframe, start_plot_profit from freqtrade.commands.strategy_utils_commands import start_strategy_update diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py old mode 100644 new mode 100755 index 8287879c4..b0da8fa9d --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -117,7 +117,11 @@ NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"] -ARGS_STRATEGY_UTILS = ["strategy_list", "strategy_path", "recursive_strategy_search"] +ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_search"] + +ARGS_LOOKAHEAD_ANALYSIS = [ + a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", 'cache') + ] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"] class Arguments: @@ -201,8 +205,9 @@ class Arguments: start_install_ui, start_list_data, start_list_exchanges, start_list_freqAI_models, start_list_markets, start_list_strategies, start_list_timeframes, - start_new_config, start_new_strategy, start_plot_dataframe, - start_plot_profit, start_show_trades, start_strategy_update, + start_lookahead_analysis, start_new_config, + start_new_strategy, start_plot_dataframe, start_plot_profit, + start_show_trades, start_strategy_update, start_test_pairlist, start_trading, start_webserver) subparsers = self.parser.add_subparsers(dest='command', @@ -451,4 +456,15 @@ class Arguments: 'files to the current version', parents=[_common_parser]) strategy_updater_cmd.set_defaults(func=start_strategy_update) - self._build_args(optionlist=ARGS_STRATEGY_UTILS, parser=strategy_updater_cmd) + self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd) + + # Add lookahead_analysis subcommand + lookahead_analayis_cmd = subparsers.add_parser( + 'lookahead-analysis', + help="Check for potential look ahead bias.", + parents=[_common_parser, _strategy_parser]) + + lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis) + + self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS, + parser=lookahead_analayis_cmd) diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py old mode 100644 new mode 100755 index f5e6d6926..08283430e --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -690,4 +690,21 @@ AVAILABLE_CLI_OPTIONS = { help='Run backtest with ready models.', action='store_true' ), + "minimum_trade_amount": Arg( + '--minimum-trade-amount', + help='Minimum trade amount for lookahead-analysis', + type=check_int_positive, + metavar='INT', + ), + "targeted_trade_amount": Arg( + '--targeted-trade-amount', + help='Targeted trade amount for lookahead analysis', + type=check_int_positive, + metavar='INT', + ), + "lookahead_analysis_exportfilename": Arg( + '--lookahead-analysis-exportfilename', + help="Use this csv-filename to store lookahead-analysis-results", + type=str + ), } diff --git a/freqtrade/commands/data_commands.py b/freqtrade/commands/data_commands.py index ed1571002..6d26b3ba9 100644 --- a/freqtrade/commands/data_commands.py +++ b/freqtrade/commands/data_commands.py @@ -1,18 +1,16 @@ import logging import sys from collections import defaultdict -from datetime import datetime, timedelta -from typing import Any, Dict, List +from typing import Any, Dict from freqtrade.configuration import TimeRange, setup_utils_configuration from freqtrade.constants import DATETIME_PRINT_FORMAT, Config from freqtrade.data.converter import convert_ohlcv_format, convert_trades_format -from freqtrade.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data, - refresh_backtest_trades_data) +from freqtrade.data.history import convert_trades_to_ohlcv, download_data_main from freqtrade.enums import CandleType, RunMode, TradingMode from freqtrade.exceptions import OperationalException -from freqtrade.exchange import market_is_active, timeframe_to_minutes -from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist, expand_pairlist +from freqtrade.exchange import timeframe_to_minutes +from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist from freqtrade.resolvers import ExchangeResolver from freqtrade.util.binance_mig import migrate_binance_futures_data @@ -20,7 +18,7 @@ from freqtrade.util.binance_mig import migrate_binance_futures_data logger = logging.getLogger(__name__) -def _data_download_sanity(config: Config) -> None: +def _check_data_config_download_sanity(config: Config) -> None: if 'days' in config and 'timerange' in config: raise OperationalException("--days and --timerange are mutually exclusive. " "You can only specify one or the other.") @@ -37,78 +35,14 @@ def start_download_data(args: Dict[str, Any]) -> None: """ config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) - _data_download_sanity(config) - timerange = TimeRange() - if 'days' in config: - time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d") - timerange = TimeRange.parse_timerange(f'{time_since}-') - - if 'timerange' in config: - timerange = timerange.parse_timerange(config['timerange']) - - # Remove stake-currency to skip checks which are not relevant for datadownload - config['stake_currency'] = '' - - pairs_not_available: List[str] = [] - - # Init exchange - exchange = ExchangeResolver.load_exchange(config, validate=False) - markets = [p for p, m in exchange.markets.items() if market_is_active(m) - or config.get('include_inactive')] - - expanded_pairs = dynamic_expand_pairlist(config, markets) - - # Manual validations of relevant settings - if not config['exchange'].get('skip_pair_validation', False): - exchange.validate_pairs(expanded_pairs) - logger.info(f"About to download pairs: {expanded_pairs}, " - f"intervals: {config['timeframes']} to {config['datadir']}") - - for timeframe in config['timeframes']: - exchange.validate_timeframes(timeframe) + _check_data_config_download_sanity(config) try: - - if config.get('download_trades'): - if config.get('trading_mode') == 'futures': - raise OperationalException("Trade download not supported for futures.") - pairs_not_available = refresh_backtest_trades_data( - exchange, pairs=expanded_pairs, datadir=config['datadir'], - timerange=timerange, new_pairs_days=config['new_pairs_days'], - erase=bool(config.get('erase')), data_format=config['dataformat_trades']) - - # Convert downloaded trade data to different timeframes - convert_trades_to_ohlcv( - pairs=expanded_pairs, timeframes=config['timeframes'], - datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')), - data_format_ohlcv=config['dataformat_ohlcv'], - data_format_trades=config['dataformat_trades'], - ) - else: - if not exchange.get_option('ohlcv_has_history', True): - raise OperationalException( - f"Historic klines not available for {exchange.name}. " - "Please use `--dl-trades` instead for this exchange " - "(will unfortunately take a long time)." - ) - migrate_binance_futures_data(config) - pairs_not_available = refresh_backtest_ohlcv_data( - exchange, pairs=expanded_pairs, timeframes=config['timeframes'], - datadir=config['datadir'], timerange=timerange, - new_pairs_days=config['new_pairs_days'], - erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'], - trading_mode=config.get('trading_mode', 'spot'), - prepend=config.get('prepend_data', False) - ) + download_data_main(config) except KeyboardInterrupt: sys.exit("SIGINT received, aborting ...") - finally: - if pairs_not_available: - logger.info(f"Pairs [{','.join(pairs_not_available)}] not available " - f"on exchange {exchange.name}.") - def start_convert_trades(args: Dict[str, Any]) -> None: diff --git a/freqtrade/commands/list_commands.py b/freqtrade/commands/list_commands.py index 3358f8cc8..84f237f77 100644 --- a/freqtrade/commands/list_commands.py +++ b/freqtrade/commands/list_commands.py @@ -1,7 +1,7 @@ import csv import logging import sys -from typing import Any, Dict, List +from typing import Any, Dict, List, Union import rapidjson from colorama import Fore, Style @@ -11,9 +11,10 @@ from tabulate import tabulate from freqtrade.configuration import setup_utils_configuration from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException -from freqtrade.exchange import market_is_active, validate_exchanges +from freqtrade.exchange import list_available_exchanges, market_is_active from freqtrade.misc import parse_db_uri_for_logging, plural from freqtrade.resolvers import ExchangeResolver, StrategyResolver +from freqtrade.types import ValidExchangesType logger = logging.getLogger(__name__) @@ -25,18 +26,42 @@ def start_list_exchanges(args: Dict[str, Any]) -> None: :param args: Cli args from Arguments() :return: None """ - exchanges = validate_exchanges(args['list_exchanges_all']) + exchanges = list_available_exchanges(args['list_exchanges_all']) if args['print_one_column']: - print('\n'.join([e[0] for e in exchanges])) + print('\n'.join([e['name'] for e in exchanges])) else: + headers = { + 'name': 'Exchange name', + 'supported': 'Supported', + 'trade_modes': 'Markets', + 'comment': 'Reason', + } + headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {}) + + def build_entry(exchange: ValidExchangesType, valid: bool): + valid_entry = {'valid': exchange['valid']} if valid else {} + result: Dict[str, Union[str, bool]] = { + 'name': exchange['name'], + **valid_entry, + 'supported': 'Official' if exchange['supported'] else '', + 'trade_modes': ', '.join( + (f"{a['margin_mode']} " if a['margin_mode'] else '') + a['trading_mode'] + for a in exchange['trade_modes'] + ), + 'comment': exchange['comment'], + } + + return result + if args['list_exchanges_all']: print("All exchanges supported by the ccxt library:") + exchanges = [build_entry(e, True) for e in exchanges] else: print("Exchanges available for Freqtrade:") - exchanges = [e for e in exchanges if e[1] is not False] + exchanges = [build_entry(e, False) for e in exchanges if e['valid'] is not False] - print(tabulate(exchanges, headers=['Exchange name', 'Valid', 'reason'])) + print(tabulate(exchanges, headers=headers, )) def _print_objs_tabular(objs: List, print_colorized: bool) -> None: diff --git a/freqtrade/commands/optimize_commands.py b/freqtrade/commands/optimize_commands.py index 1bfd384fc..cdddf0fe5 100644 --- a/freqtrade/commands/optimize_commands.py +++ b/freqtrade/commands/optimize_commands.py @@ -132,3 +132,15 @@ def start_edge(args: Dict[str, Any]) -> None: # Initialize Edge object edge_cli = EdgeCli(config) edge_cli.start() + + +def start_lookahead_analysis(args: Dict[str, Any]) -> None: + """ + Start the backtest bias tester script + :param args: Cli args from Arguments() + :return: None + """ + from freqtrade.optimize.lookahead_analysis_helpers import LookaheadAnalysisSubFunctions + + config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) + LookaheadAnalysisSubFunctions.start(config) diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 8e9a7fd7c..43ede568c 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -203,7 +203,7 @@ class Configuration: # This will override the strategy configuration self._args_to_config(config, argname='timeframe', logstring='Parameter -i/--timeframe detected ... ' - 'Using timeframe: {} ...') + 'Using timeframe: {} ...') self._args_to_config(config, argname='position_stacking', logstring='Parameter --enable-position-stacking detected ...') @@ -300,6 +300,9 @@ class Configuration: self._args_to_config(config, argname='hyperoptexportfilename', logstring='Using hyperopt file: {}') + self._args_to_config(config, argname='lookahead_analysis_exportfilename', + logstring='Saving lookahead analysis results into {} ...') + self._args_to_config(config, argname='epochs', logstring='Parameter --epochs detected ... ' 'Will run Hyperopt with for {} epochs ...' @@ -474,6 +477,19 @@ class Configuration: self._args_to_config(config, argname='analysis_csv_path', logstring='Path to store analysis CSVs: {}') + self._args_to_config(config, argname='analysis_csv_path', + logstring='Path to store analysis CSVs: {}') + + # Lookahead analysis results + self._args_to_config(config, argname='targeted_trade_amount', + logstring='Targeted Trade amount: {}') + + self._args_to_config(config, argname='minimum_trade_amount', + logstring='Minimum Trade amount: {}') + + self._args_to_config(config, argname='lookahead_analysis_exportfilename', + logstring='Path to store lookahead-analysis-results: {}') + def _process_runmode(self, config: Config) -> None: self._args_to_config(config, argname='dry_run', @@ -552,6 +568,7 @@ class Configuration: # Fall back to /dl_path/pairs.json pairs_file = config['datadir'] / 'pairs.json' if pairs_file.exists(): + logger.info(f'Reading pairs file "{pairs_file}".') config['pairs'] = load_file(pairs_file) if 'pairs' in config and isinstance(config['pairs'], list): config['pairs'].sort() diff --git a/freqtrade/configuration/timerange.py b/freqtrade/configuration/timerange.py index cff35db7e..23e3a6b60 100644 --- a/freqtrade/configuration/timerange.py +++ b/freqtrade/configuration/timerange.py @@ -6,6 +6,8 @@ import re from datetime import datetime, timezone from typing import Optional +from typing_extensions import Self + from freqtrade.constants import DATETIME_PRINT_FORMAT from freqtrade.exceptions import OperationalException @@ -107,15 +109,15 @@ class TimeRange: self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles) self.starttype = 'date' - @staticmethod - def parse_timerange(text: Optional[str]) -> 'TimeRange': + @classmethod + def parse_timerange(cls, text: Optional[str]) -> Self: """ Parse the value of the argument --timerange to determine what is the range desired :param text: value from --timerange :return: Start and End range period """ if not text: - return TimeRange(None, None, 0, 0) + return cls(None, None, 0, 0) syntax = [(r'^-(\d{8})$', (None, 'date')), (r'^(\d{8})-$', ('date', None)), (r'^(\d{8})-(\d{8})$', ('date', 'date')), @@ -156,5 +158,5 @@ class TimeRange: if start > stop > 0: raise OperationalException( f'Start date is after stop date for timerange "{text}"') - return TimeRange(stype[0], stype[1], start, stop) + return cls(stype[0], stype[1], start, stop) raise OperationalException(f'Incorrect syntax for timerange "{text}"') diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 3802ec3ad..de1e7aa51 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -8,6 +8,7 @@ from typing import Any, Dict, List, Literal, Tuple from freqtrade.enums import CandleType, PriceType, RPCMessageType +DOCS_LINK = "https://www.freqtrade.io/en/stable" DEFAULT_CONFIG = 'config.json' DEFAULT_EXCHANGE = 'bittrex' PROCESS_THROTTLE_SECS = 5 # sec @@ -111,6 +112,8 @@ MINIMAL_CONFIG = { } } +__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType} + # Required json-schema for user specified config CONF_SCHEMA = { 'type': 'object', @@ -148,7 +151,6 @@ CONF_SCHEMA = { 'patternProperties': { '^[0-9.]+$': {'type': 'number'} }, - 'minProperties': 1 }, 'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5}, 'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True, 'minimum': -1}, @@ -164,6 +166,9 @@ CONF_SCHEMA = { 'trading_mode': {'type': 'string', 'enum': TRADING_MODES}, 'margin_mode': {'type': 'string', 'enum': MARGIN_MODES}, 'reduce_df_footprint': {'type': 'boolean', 'default': False}, + 'minimum_trade_amount': {'type': 'number', 'default': 10}, + 'targeted_trade_amount': {'type': 'number', 'default': 20}, + 'lookahead_analysis_exportfilename': {'type': 'string'}, 'liquidation_buffer': {'type': 'number', 'minimum': 0.0, 'maximum': 0.99}, 'backtest_breakdown': { 'type': 'array', @@ -351,7 +356,8 @@ CONF_SCHEMA = { 'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'}, 'retries': {'type': 'integer', 'minimum': 0}, 'retry_delay': {'type': 'number', 'minimum': 0}, - **dict([(x, {'type': 'object'}) for x in RPCMessageType]), + **__MESSAGE_TYPE_DICT, + # **{x: {'type': 'object'} for x in RPCMessageType}, # Below -> Deprecated 'webhookentry': {'type': 'object'}, 'webhookentrycancel': {'type': 'object'}, diff --git a/freqtrade/data/history/__init__.py b/freqtrade/data/history/__init__.py index 107f9c401..414848c22 100644 --- a/freqtrade/data/history/__init__.py +++ b/freqtrade/data/history/__init__.py @@ -6,7 +6,7 @@ Includes: * download data from exchange and store to disk """ # flake8: noqa: F401 -from .history_utils import (convert_trades_to_ohlcv, get_timerange, load_data, load_pair_history, - refresh_backtest_ohlcv_data, refresh_backtest_trades_data, refresh_data, - validate_backtest_data) +from .history_utils import (convert_trades_to_ohlcv, download_data_main, get_timerange, load_data, + load_pair_history, refresh_backtest_ohlcv_data, + refresh_backtest_trades_data, refresh_data, validate_backtest_data) from .idatahandler import get_datahandler diff --git a/freqtrade/data/history/history_utils.py b/freqtrade/data/history/history_utils.py index dc3c7c1e6..e61f59cfa 100644 --- a/freqtrade/data/history/history_utils.py +++ b/freqtrade/data/history/history_utils.py @@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple from pandas import DataFrame, concat from freqtrade.configuration import TimeRange -from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS +from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_DATAFRAME_COLUMNS, Config from freqtrade.data.converter import (clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_remove_duplicates, trades_to_ohlcv) from freqtrade.data.history.idatahandler import IDataHandler, get_datahandler @@ -15,6 +15,8 @@ from freqtrade.enums import CandleType from freqtrade.exceptions import OperationalException from freqtrade.exchange import Exchange from freqtrade.misc import format_ms_time +from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist +from freqtrade.util.binance_mig import migrate_binance_futures_data logger = logging.getLogger(__name__) @@ -227,9 +229,11 @@ def _download_pair_history(pair: str, *, ) logger.debug("Current Start: %s", - f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None') + f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" + if not data.empty else 'None') logger.debug("Current End: %s", - f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None') + f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" + if not data.empty else 'None') # Default since_ms to 30 days if nothing is given new_data = exchange.get_historic_ohlcv(pair=pair, @@ -252,10 +256,12 @@ def _download_pair_history(pair: str, *, data = clean_ohlcv_dataframe(concat([data, new_dataframe], axis=0), timeframe, pair, fill_missing=False, drop_incomplete=False) - logger.debug("New Start: %s", - f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None') + logger.debug("New Start: %s", + f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" + if not data.empty else 'None') logger.debug("New End: %s", - f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None') + f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" + if not data.empty else 'None') data_handler.ohlcv_store(pair, timeframe, data=data, candle_type=candle_type) return True @@ -290,7 +296,7 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes continue for timeframe in timeframes: - logger.info(f'Downloading pair {pair}, interval {timeframe}.') + logger.debug(f'Downloading pair {pair}, {candle_type}, interval {timeframe}.') process = f'{idx}/{len(pairs)}' _download_pair_history(pair=pair, process=process, datadir=datadir, exchange=exchange, @@ -479,3 +485,77 @@ def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime, logger.warning("%s has missing frames: expected %s, got %s, that's %s missing values", pair, expected_frames, dflen, expected_frames - dflen) return found_missing + + +def download_data_main(config: Config) -> None: + + timerange = TimeRange() + if 'days' in config: + time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d") + timerange = TimeRange.parse_timerange(f'{time_since}-') + + if 'timerange' in config: + timerange = timerange.parse_timerange(config['timerange']) + + # Remove stake-currency to skip checks which are not relevant for datadownload + config['stake_currency'] = '' + + pairs_not_available: List[str] = [] + + # Init exchange + from freqtrade.resolvers.exchange_resolver import ExchangeResolver + exchange = ExchangeResolver.load_exchange(config, validate=False) + available_pairs = [ + p for p in exchange.get_markets( + tradable_only=True, active_only=not config.get('include_inactive') + ).keys() + ] + + expanded_pairs = dynamic_expand_pairlist(config, available_pairs) + + # Manual validations of relevant settings + if not config['exchange'].get('skip_pair_validation', False): + exchange.validate_pairs(expanded_pairs) + logger.info(f"About to download pairs: {expanded_pairs}, " + f"intervals: {config['timeframes']} to {config['datadir']}") + + for timeframe in config['timeframes']: + exchange.validate_timeframes(timeframe) + + # Start downloading + try: + if config.get('download_trades'): + if config.get('trading_mode') == 'futures': + raise OperationalException("Trade download not supported for futures.") + pairs_not_available = refresh_backtest_trades_data( + exchange, pairs=expanded_pairs, datadir=config['datadir'], + timerange=timerange, new_pairs_days=config['new_pairs_days'], + erase=bool(config.get('erase')), data_format=config['dataformat_trades']) + + # Convert downloaded trade data to different timeframes + convert_trades_to_ohlcv( + pairs=expanded_pairs, timeframes=config['timeframes'], + datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')), + data_format_ohlcv=config['dataformat_ohlcv'], + data_format_trades=config['dataformat_trades'], + ) + else: + if not exchange.get_option('ohlcv_has_history', True): + raise OperationalException( + f"Historic klines not available for {exchange.name}. " + "Please use `--dl-trades` instead for this exchange " + "(will unfortunately take a long time)." + ) + migrate_binance_futures_data(config) + pairs_not_available = refresh_backtest_ohlcv_data( + exchange, pairs=expanded_pairs, timeframes=config['timeframes'], + datadir=config['datadir'], timerange=timerange, + new_pairs_days=config['new_pairs_days'], + erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'], + trading_mode=config.get('trading_mode', 'spot'), + prepend=config.get('prepend_data', False) + ) + finally: + if pairs_not_available: + logger.info(f"Pairs [{','.join(pairs_not_available)}] not available " + f"on exchange {exchange.name}.") diff --git a/freqtrade/enums/marginmode.py b/freqtrade/enums/marginmode.py index 1e42809ea..7fd749b29 100644 --- a/freqtrade/enums/marginmode.py +++ b/freqtrade/enums/marginmode.py @@ -1,7 +1,7 @@ from enum import Enum -class MarginMode(Enum): +class MarginMode(str, Enum): """ Enum to distinguish between cross margin/futures margin_mode and diff --git a/freqtrade/exchange/__init__.py b/freqtrade/exchange/__init__.py index 12fb0c55e..9ac31a0d8 100644 --- a/freqtrade/exchange/__init__.py +++ b/freqtrade/exchange/__init__.py @@ -13,11 +13,11 @@ from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_c amount_to_contracts, amount_to_precision, available_exchanges, ccxt_exchanges, contracts_to_amount, date_minus_candles, - is_exchange_known_ccxt, market_is_active, - price_to_precision, timeframe_to_minutes, - timeframe_to_msecs, timeframe_to_next_date, - timeframe_to_prev_date, timeframe_to_seconds, - validate_exchange, validate_exchanges) + is_exchange_known_ccxt, list_available_exchanges, + market_is_active, price_to_precision, + timeframe_to_minutes, timeframe_to_msecs, + timeframe_to_next_date, timeframe_to_prev_date, + timeframe_to_seconds, validate_exchange) from freqtrade.exchange.gate import Gate from freqtrade.exchange.hitbtc import Hitbtc from freqtrade.exchange.huobi import Huobi diff --git a/freqtrade/exchange/binance_leverage_tiers.json b/freqtrade/exchange/binance_leverage_tiers.json index 0f252f63e..b211be701 100644 --- a/freqtrade/exchange/binance_leverage_tiers.json +++ b/freqtrade/exchange/binance_leverage_tiers.json @@ -120,10 +120,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "1", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.02", @@ -136,10 +136,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 10.0, + "maxLeverage": 8.0, "info": { "bracket": "2", - "initialLeverage": "10", + "initialLeverage": "8", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -152,10 +152,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 8.0, + "maxLeverage": 6.0, "info": { "bracket": "3", - "initialLeverage": "8", + "initialLeverage": "6", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -198,13 +198,13 @@ "tier": 6.0, "currency": "BUSD", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "1500000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386900.0" @@ -332,120 +332,6 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 20.0, - "info": { - "bracket": "1", - "initialLeverage": "20", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.02", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 15.0, - "info": { - "bracket": "2", - "initialLeverage": "15", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "25.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 600000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "3", - "initialLeverage": "10", - "notionalCap": "600000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "650.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 600000.0, - "maxNotional": 1600000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "1600000", - "notionalFloor": "600000", - "maintMarginRatio": "0.1", - "cum": "30650.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 1600000.0, - "maxNotional": 2000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, - "info": { - "bracket": "5", - "initialLeverage": "4", - "notionalCap": "2000000", - "notionalFloor": "1600000", - "maintMarginRatio": "0.125", - "cum": "70650.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 2000000.0, - "maxNotional": 6000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, - "info": { - "bracket": "6", - "initialLeverage": "2", - "notionalCap": "6000000", - "notionalFloor": "2000000", - "maintMarginRatio": "0.25", - "cum": "320650.0" - } - }, - { - "tier": 7.0, - "currency": "USDT", - "minNotional": 6000000.0, - "maxNotional": 10000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "7", - "initialLeverage": "1", - "notionalCap": "10000000", - "notionalFloor": "6000000", - "maintMarginRatio": "0.5", - "cum": "1820650.0" - } - } - ], - "1000SHIB/BUSD:BUSD": [ - { - "tier": 1.0, - "currency": "BUSD", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.02, "maxLeverage": 25.0, "info": { "bracket": "1", @@ -456,16 +342,130 @@ "cum": "0.0" } }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 50000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "2", + "initialLeverage": "20", + "notionalCap": "50000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "25.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 50000.0, + "maxNotional": 900000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "3", + "initialLeverage": "10", + "notionalCap": "900000", + "notionalFloor": "50000", + "maintMarginRatio": "0.05", + "cum": "1275.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 900000.0, + "maxNotional": 2400000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "2400000", + "notionalFloor": "900000", + "maintMarginRatio": "0.1", + "cum": "46275.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 2400000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "5", + "initialLeverage": "4", + "notionalCap": "3000000", + "notionalFloor": "2400000", + "maintMarginRatio": "0.125", + "cum": "106275.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 3000000.0, + "maxNotional": 9000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "6", + "initialLeverage": "2", + "notionalCap": "9000000", + "notionalFloor": "3000000", + "maintMarginRatio": "0.25", + "cum": "481275.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 9000000.0, + "maxNotional": 15000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "7", + "initialLeverage": "1", + "notionalCap": "15000000", + "notionalFloor": "9000000", + "maintMarginRatio": "0.5", + "cum": "2731275.0" + } + } + ], + "1000SHIB/BUSD:BUSD": [ + { + "tier": 1.0, + "currency": "BUSD", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 10.0, + "info": { + "bracket": "1", + "initialLeverage": "10", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.02", + "cum": "0.0" + } + }, { "tier": 2.0, "currency": "BUSD", "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 15.0, + "maxLeverage": 8.0, "info": { "bracket": "2", - "initialLeverage": "15", + "initialLeverage": "8", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -478,10 +478,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 6.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "6", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -540,13 +540,13 @@ "tier": 7.0, "currency": "BUSD", "minNotional": 3000000.0, - "maxNotional": 8000000.0, + "maxNotional": 3500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "8000000", + "notionalCap": "3500000", "notionalFloor": "3000000", "maintMarginRatio": "0.5", "cum": "949400.0" @@ -590,13 +590,13 @@ "tier": 3.0, "currency": "USDT", "minNotional": 25000.0, - "maxNotional": 50000.0, + "maxNotional": 100000.0, "maintenanceMarginRate": 0.01, "maxLeverage": 25.0, "info": { "bracket": "3", "initialLeverage": "25", - "notionalCap": "50000", + "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.01", "cum": "67.5" @@ -605,87 +605,87 @@ { "tier": 4.0, "currency": "USDT", - "minNotional": 50000.0, - "maxNotional": 150000.0, + "minNotional": 100000.0, + "maxNotional": 300000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "4", "initialLeverage": "20", - "notionalCap": "150000", - "notionalFloor": "50000", + "notionalCap": "300000", + "notionalFloor": "100000", "maintMarginRatio": "0.025", - "cum": "817.5" + "cum": "1567.5" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 150000.0, - "maxNotional": 250000.0, + "minNotional": 300000.0, + "maxNotional": 750000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "5", "initialLeverage": "10", - "notionalCap": "250000", - "notionalFloor": "150000", + "notionalCap": "750000", + "notionalFloor": "300000", "maintMarginRatio": "0.05", - "cum": "4567.5" + "cum": "9067.5" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 500000.0, + "minNotional": 750000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "6", "initialLeverage": "5", - "notionalCap": "500000", - "notionalFloor": "250000", + "notionalCap": "1500000", + "notionalFloor": "750000", "maintMarginRatio": "0.1", - "cum": "17067.5" + "cum": "46567.5" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, + "minNotional": 1500000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, "info": { "bracket": "7", "initialLeverage": "4", - "notionalCap": "1000000", - "notionalFloor": "500000", + "notionalCap": "3000000", + "notionalFloor": "1500000", "maintMarginRatio": "0.125", - "cum": "29567.5" + "cum": "84067.5" } }, { "tier": 8.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "minNotional": 3000000.0, + "maxNotional": 6000000.0, "maintenanceMarginRate": 0.25, "maxLeverage": 2.0, "info": { "bracket": "8", "initialLeverage": "2", - "notionalCap": "2000000", - "notionalFloor": "1000000", + "notionalCap": "6000000", + "notionalFloor": "3000000", "maintMarginRatio": "0.25", - "cum": "154567.5" + "cum": "459067.5" } }, { "tier": 9.0, "currency": "USDT", - "minNotional": 2000000.0, + "minNotional": 6000000.0, "maxNotional": 30000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, @@ -693,9 +693,9 @@ "bracket": "9", "initialLeverage": "1", "notionalCap": "30000000", - "notionalFloor": "2000000", + "notionalFloor": "6000000", "maintMarginRatio": "0.5", - "cum": "654567.5" + "cum": "1959067.5" } } ], @@ -1978,10 +1978,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 20.0, + "maxLeverage": 10.0, "info": { "bracket": "1", - "initialLeverage": "20", + "initialLeverage": "10", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.02", @@ -1994,10 +1994,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 15.0, + "maxLeverage": 8.0, "info": { "bracket": "2", - "initialLeverage": "15", + "initialLeverage": "8", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -2010,10 +2010,10 @@ "minNotional": 25000.0, "maxNotional": 200000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 6.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "6", "notionalCap": "200000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -2072,13 +2072,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 3000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3500000", "notionalFloor": "3000000", "maintMarginRatio": "0.5", "cum": "898150.0" @@ -3132,13 +3132,13 @@ "tier": 3.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 400000.0, + "maxNotional": 600000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "3", "initialLeverage": "20", - "notionalCap": "400000", + "notionalCap": "600000", "notionalFloor": "50000", "maintMarginRatio": "0.025", "cum": "770.0" @@ -3147,39 +3147,39 @@ { "tier": 4.0, "currency": "USDT", - "minNotional": 400000.0, - "maxNotional": 800000.0, + "minNotional": 600000.0, + "maxNotional": 1200000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "4", "initialLeverage": "10", - "notionalCap": "800000", - "notionalFloor": "400000", + "notionalCap": "1200000", + "notionalFloor": "600000", "maintMarginRatio": "0.05", - "cum": "10770.0" + "cum": "15770.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 800000.0, - "maxNotional": 2000000.0, + "minNotional": 1200000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "5", "initialLeverage": "5", - "notionalCap": "2000000", - "notionalFloor": "800000", + "notionalCap": "3000000", + "notionalFloor": "1200000", "maintMarginRatio": "0.1", - "cum": "50770.0" + "cum": "75770.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 2000000.0, + "minNotional": 3000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, @@ -3187,9 +3187,9 @@ "bracket": "6", "initialLeverage": "4", "notionalCap": "5000000", - "notionalFloor": "2000000", + "notionalFloor": "3000000", "maintMarginRatio": "0.125", - "cum": "100770.0" + "cum": "150770.0" } }, { @@ -3205,7 +3205,7 @@ "notionalCap": "12000000", "notionalFloor": "5000000", "maintMarginRatio": "0.25", - "cum": "725770.0" + "cum": "775770.0" } }, { @@ -3221,7 +3221,7 @@ "notionalCap": "20000000", "notionalFloor": "12000000", "maintMarginRatio": "0.5", - "cum": "3725770.0" + "cum": "3775770.0" } } ], @@ -4874,13 +4874,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, + "maxNotional": 50000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 10.0, "info": { "bracket": "2", "initialLeverage": "10", - "notionalCap": "25000", + "notionalCap": "50000", "notionalFloor": "5000", "maintMarginRatio": "0.025", "cum": "25.0" @@ -4889,65 +4889,81 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, + "minNotional": 50000.0, + "maxNotional": 200000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 8.0, "info": { "bracket": "3", "initialLeverage": "8", - "notionalCap": "100000", - "notionalFloor": "25000", + "notionalCap": "200000", + "notionalFloor": "50000", "maintMarginRatio": "0.05", - "cum": "650.0" + "cum": "1275.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, + "minNotional": 200000.0, + "maxNotional": 500000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "4", "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", + "notionalCap": "500000", + "notionalFloor": "200000", "maintMarginRatio": "0.1", - "cum": "5650.0" + "cum": "11275.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 250000.0, + "minNotional": 500000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, + "maxLeverage": 4.0, "info": { "bracket": "5", - "initialLeverage": "2", + "initialLeverage": "4", "notionalCap": "1000000", - "notionalFloor": "250000", + "notionalFloor": "500000", "maintMarginRatio": "0.125", - "cum": "11900.0" + "cum": "23775.0" } }, { "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "maxNotional": 1500000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "6", + "initialLeverage": "2", + "notionalCap": "1500000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.25", + "cum": "148775.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 1500000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "6", + "bracket": "7", "initialLeverage": "1", - "notionalCap": "2000000", - "notionalFloor": "1000000", + "notionalCap": "3000000", + "notionalFloor": "1500000", "maintMarginRatio": "0.5", - "cum": "386900.0" + "cum": "523775.0" } } ], @@ -6714,13 +6730,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, + "maxNotional": 50000.0, "maintenanceMarginRate": 0.015, "maxLeverage": 25.0, "info": { "bracket": "2", "initialLeverage": "25", - "notionalCap": "25000", + "notionalCap": "50000", "notionalFloor": "5000", "maintMarginRatio": "0.015", "cum": "25.0" @@ -6729,23 +6745,23 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 300000.0, + "minNotional": 50000.0, + "maxNotional": 400000.0, "maintenanceMarginRate": 0.02, "maxLeverage": 20.0, "info": { "bracket": "3", "initialLeverage": "20", - "notionalCap": "300000", - "notionalFloor": "25000", + "notionalCap": "400000", + "notionalFloor": "50000", "maintMarginRatio": "0.02", - "cum": "150.0" + "cum": "275.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 300000.0, + "minNotional": 400000.0, "maxNotional": 1200000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, @@ -6753,9 +6769,9 @@ "bracket": "4", "initialLeverage": "10", "notionalCap": "1200000", - "notionalFloor": "300000", + "notionalFloor": "400000", "maintMarginRatio": "0.05", - "cum": "9150.0" + "cum": "12275.0" } }, { @@ -6771,7 +6787,7 @@ "notionalCap": "3000000", "notionalFloor": "1200000", "maintMarginRatio": "0.1", - "cum": "69150.0" + "cum": "72275.0" } }, { @@ -6787,7 +6803,7 @@ "notionalCap": "6000000", "notionalFloor": "3000000", "maintMarginRatio": "0.125", - "cum": "144150.0" + "cum": "147275.0" } }, { @@ -6803,7 +6819,7 @@ "notionalCap": "18000000", "notionalFloor": "6000000", "maintMarginRatio": "0.25", - "cum": "894150.0" + "cum": "897275.0" } }, { @@ -6819,7 +6835,7 @@ "notionalCap": "30000000", "notionalFloor": "18000000", "maintMarginRatio": "0.5", - "cum": "5394150.0" + "cum": "5397275.0" } } ], @@ -7140,10 +7156,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 20.0, + "maxLeverage": 8.0, "info": { "bracket": "1", - "initialLeverage": "20", + "initialLeverage": "8", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.02", @@ -7156,10 +7172,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 15.0, + "maxLeverage": 7.0, "info": { "bracket": "2", - "initialLeverage": "15", + "initialLeverage": "7", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -7172,10 +7188,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 6.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "6", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -7218,19 +7234,133 @@ "tier": 6.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "6", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "1500000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386900.0" } } ], + "COMBO/USDT:USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 20.0, + "info": { + "bracket": "1", + "initialLeverage": "20", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.02", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 15.0, + "info": { + "bracket": "2", + "initialLeverage": "15", + "notionalCap": "25000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "25.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 200000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "3", + "initialLeverage": "10", + "notionalCap": "200000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "650.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 200000.0, + "maxNotional": 500000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "500000", + "notionalFloor": "200000", + "maintMarginRatio": "0.1", + "cum": "10650.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 500000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "5", + "initialLeverage": "4", + "notionalCap": "1000000", + "notionalFloor": "500000", + "maintMarginRatio": "0.125", + "cum": "23150.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "6", + "initialLeverage": "2", + "notionalCap": "3000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.25", + "cum": "148150.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 3000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "7", + "initialLeverage": "1", + "notionalCap": "5000000", + "notionalFloor": "3000000", + "maintMarginRatio": "0.5", + "cum": "898150.0" + } + } + ], "COMP/USDT:USDT": [ { "tier": 1.0, @@ -10152,13 +10282,13 @@ "tier": 4.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 250000.0, + "maxNotional": 400000.0, "maintenanceMarginRate": 0.02, "maxLeverage": 25.0, "info": { "bracket": "4", "initialLeverage": "25", - "notionalCap": "250000", + "notionalCap": "400000", "notionalFloor": "50000", "maintMarginRatio": "0.02", "cum": "545.0" @@ -10167,39 +10297,39 @@ { "tier": 5.0, "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, + "minNotional": 400000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "5", "initialLeverage": "10", - "notionalCap": "1000000", - "notionalFloor": "250000", + "notionalCap": "2000000", + "notionalFloor": "400000", "maintMarginRatio": "0.05", - "cum": "8045.0" + "cum": "12545.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "minNotional": 2000000.0, + "maxNotional": 8000000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "6", "initialLeverage": "5", - "notionalCap": "5000000", - "notionalFloor": "1000000", + "notionalCap": "8000000", + "notionalFloor": "2000000", "maintMarginRatio": "0.1", - "cum": "58045.0" + "cum": "112545.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 5000000.0, + "minNotional": 8000000.0, "maxNotional": 10000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, @@ -10207,9 +10337,9 @@ "bracket": "7", "initialLeverage": "4", "notionalCap": "10000000", - "notionalFloor": "5000000", + "notionalFloor": "8000000", "maintMarginRatio": "0.125", - "cum": "183045.0" + "cum": "312545.0" } }, { @@ -10225,7 +10355,7 @@ "notionalCap": "20000000", "notionalFloor": "10000000", "maintMarginRatio": "0.15", - "cum": "433045.0" + "cum": "562545.0" } }, { @@ -10241,7 +10371,7 @@ "notionalCap": "30000000", "notionalFloor": "20000000", "maintMarginRatio": "0.25", - "cum": "2433045.0" + "cum": "2562545.0" } }, { @@ -10257,7 +10387,7 @@ "notionalCap": "50000000", "notionalFloor": "30000000", "maintMarginRatio": "0.5", - "cum": "9933045.0" + "cum": "1.0062545E7" } } ], @@ -10268,10 +10398,10 @@ "minNotional": 0.0, "maxNotional": 5.0, "maintenanceMarginRate": 0.005, - "maxLeverage": 75.0, + "maxLeverage": 100.0, "info": { "bracket": "1", - "initialLeverage": "75", + "initialLeverage": "100", "notionalCap": "5", "notionalFloor": "0", "maintMarginRatio": "0.005", @@ -10284,10 +10414,10 @@ "minNotional": 5.0, "maxNotional": 10.0, "maintenanceMarginRate": 0.006, - "maxLeverage": 50.0, + "maxLeverage": 75.0, "info": { "bracket": "2", - "initialLeverage": "50", + "initialLeverage": "75", "notionalCap": "10", "notionalFloor": "5", "maintMarginRatio": "0.006", @@ -10300,10 +10430,10 @@ "minNotional": 10.0, "maxNotional": 100.0, "maintenanceMarginRate": 0.01, - "maxLeverage": 25.0, + "maxLeverage": 50.0, "info": { "bracket": "3", - "initialLeverage": "25", + "initialLeverage": "50", "notionalCap": "100", "notionalFloor": "10", "maintMarginRatio": "0.01", @@ -10314,13 +10444,13 @@ "tier": 4.0, "currency": "BTC", "minNotional": 100.0, - "maxNotional": 250.0, + "maxNotional": 400.0, "maintenanceMarginRate": 0.02, "maxLeverage": 20.0, "info": { "bracket": "4", "initialLeverage": "20", - "notionalCap": "250", + "notionalCap": "400", "notionalFloor": "100", "maintMarginRatio": "0.02", "cum": "1.045" @@ -10329,7 +10459,7 @@ { "tier": 5.0, "currency": "BTC", - "minNotional": 250.0, + "minNotional": 400.0, "maxNotional": 800.0, "maintenanceMarginRate": 0.025, "maxLeverage": 10.0, @@ -10337,9 +10467,9 @@ "bracket": "5", "initialLeverage": "10", "notionalCap": "800", - "notionalFloor": "250", + "notionalFloor": "400", "maintMarginRatio": "0.025", - "cum": "2.295" + "cum": "3.045" } }, { @@ -10355,7 +10485,7 @@ "notionalCap": "1500", "notionalFloor": "800", "maintMarginRatio": "0.05", - "cum": "22.295" + "cum": "23.045" } }, { @@ -10371,7 +10501,7 @@ "notionalCap": "2000", "notionalFloor": "1500", "maintMarginRatio": "0.1", - "cum": "97.295" + "cum": "98.045" } }, { @@ -10387,7 +10517,7 @@ "notionalCap": "3000", "notionalFloor": "2000", "maintMarginRatio": "0.125", - "cum": "147.295" + "cum": "148.045" } }, { @@ -10403,7 +10533,7 @@ "notionalCap": "5000", "notionalFloor": "3000", "maintMarginRatio": "0.25", - "cum": "522.295" + "cum": "523.045" } }, { @@ -10419,7 +10549,7 @@ "notionalCap": "10000", "notionalFloor": "5000", "maintMarginRatio": "0.5", - "cum": "1772.295" + "cum": "1773.045" } } ], @@ -12594,13 +12724,13 @@ "tier": 3.0, "currency": "USDT", "minNotional": 150000.0, - "maxNotional": 250000.0, + "maxNotional": 600000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "3", "initialLeverage": "10", - "notionalCap": "250000", + "notionalCap": "600000", "notionalFloor": "150000", "maintMarginRatio": "0.05", "cum": "4500.0" @@ -12609,55 +12739,55 @@ { "tier": 4.0, "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 500000.0, + "minNotional": 600000.0, + "maxNotional": 1600000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "4", "initialLeverage": "5", - "notionalCap": "500000", - "notionalFloor": "250000", + "notionalCap": "1600000", + "notionalFloor": "600000", "maintMarginRatio": "0.1", - "cum": "17000.0" + "cum": "34500.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, + "minNotional": 1600000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, "info": { "bracket": "5", "initialLeverage": "4", - "notionalCap": "1000000", - "notionalFloor": "500000", + "notionalCap": "2000000", + "notionalFloor": "1600000", "maintMarginRatio": "0.125", - "cum": "29500.0" + "cum": "74500.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "minNotional": 2000000.0, + "maxNotional": 4000000.0, "maintenanceMarginRate": 0.25, "maxLeverage": 2.0, "info": { "bracket": "6", "initialLeverage": "2", - "notionalCap": "2000000", - "notionalFloor": "1000000", + "notionalCap": "4000000", + "notionalFloor": "2000000", "maintMarginRatio": "0.25", - "cum": "154500.0" + "cum": "324500.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 2000000.0, + "minNotional": 4000000.0, "maxNotional": 30000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, @@ -12665,9 +12795,9 @@ "bracket": "7", "initialLeverage": "1", "notionalCap": "30000000", - "notionalFloor": "2000000", + "notionalFloor": "4000000", "maintMarginRatio": "0.5", - "cum": "654500.0" + "cum": "1324500.0" } } ], @@ -14937,6 +15067,120 @@ } } ], + "KEY/USDT:USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 20.0, + "info": { + "bracket": "1", + "initialLeverage": "20", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.02", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 15.0, + "info": { + "bracket": "2", + "initialLeverage": "15", + "notionalCap": "25000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "25.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 200000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "3", + "initialLeverage": "10", + "notionalCap": "200000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "650.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 200000.0, + "maxNotional": 500000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "500000", + "notionalFloor": "200000", + "maintMarginRatio": "0.1", + "cum": "10650.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 500000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "5", + "initialLeverage": "4", + "notionalCap": "1000000", + "notionalFloor": "500000", + "maintMarginRatio": "0.125", + "cum": "23150.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "6", + "initialLeverage": "2", + "notionalCap": "3000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.25", + "cum": "148150.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 3000000.0, + "maxNotional": 5000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "7", + "initialLeverage": "1", + "notionalCap": "5000000", + "notionalFloor": "3000000", + "maintMarginRatio": "0.5", + "cum": "898150.0" + } + } + ], "KLAY/USDT:USDT": [ { "tier": 1.0, @@ -15694,10 +15938,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maxLeverage": 20.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "20", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.02", @@ -15710,10 +15954,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxLeverage": 15.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "15", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -15772,13 +16016,13 @@ "tier": 6.0, "currency": "USDT", "minNotional": 3000000.0, - "maxNotional": 9000000.0, + "maxNotional": 5000000.0, "maintenanceMarginRate": 0.25, "maxLeverage": 2.0, "info": { "bracket": "6", "initialLeverage": "2", - "notionalCap": "9000000", + "notionalCap": "5000000", "notionalFloor": "3000000", "maintMarginRatio": "0.25", "cum": "480650.0" @@ -15787,17 +16031,17 @@ { "tier": 7.0, "currency": "USDT", - "minNotional": 9000000.0, - "maxNotional": 15000000.0, + "minNotional": 5000000.0, + "maxNotional": 7000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "15000000", - "notionalFloor": "9000000", + "notionalCap": "7000000", + "notionalFloor": "5000000", "maintMarginRatio": "0.5", - "cum": "2730650.0" + "cum": "1730650.0" } } ], @@ -15808,10 +16052,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maxLeverage": 10.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "10", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.02", @@ -15824,10 +16068,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 15.0, + "maxLeverage": 8.0, "info": { "bracket": "2", - "initialLeverage": "15", + "initialLeverage": "8", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -15840,10 +16084,10 @@ "minNotional": 25000.0, "maxNotional": 100000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxLeverage": 6.0, "info": { "bracket": "3", - "initialLeverage": "10", + "initialLeverage": "6", "notionalCap": "100000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -15902,13 +16146,13 @@ "tier": 7.0, "currency": "BUSD", "minNotional": 3000000.0, - "maxNotional": 8000000.0, + "maxNotional": 4000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "8000000", + "notionalCap": "4000000", "notionalFloor": "3000000", "maintMarginRatio": "0.5", "cum": "949400.0" @@ -16636,13 +16880,13 @@ "tier": 4.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 250000.0, + "maxNotional": 500000.0, "maintenanceMarginRate": 0.02, "maxLeverage": 25.0, "info": { "bracket": "4", "initialLeverage": "25", - "notionalCap": "250000", + "notionalCap": "500000", "notionalFloor": "50000", "maintMarginRatio": "0.02", "cum": "545.0" @@ -16651,39 +16895,39 @@ { "tier": 5.0, "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, + "minNotional": 500000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "5", "initialLeverage": "10", - "notionalCap": "1000000", - "notionalFloor": "250000", + "notionalCap": "2000000", + "notionalFloor": "500000", "maintMarginRatio": "0.05", - "cum": "8045.0" + "cum": "15545.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "minNotional": 2000000.0, + "maxNotional": 8000000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "6", "initialLeverage": "5", - "notionalCap": "5000000", - "notionalFloor": "1000000", + "notionalCap": "8000000", + "notionalFloor": "2000000", "maintMarginRatio": "0.1", - "cum": "58045.0" + "cum": "115545.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 5000000.0, + "minNotional": 8000000.0, "maxNotional": 10000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, @@ -16691,9 +16935,9 @@ "bracket": "7", "initialLeverage": "4", "notionalCap": "10000000", - "notionalFloor": "5000000", + "notionalFloor": "8000000", "maintMarginRatio": "0.125", - "cum": "183045.0" + "cum": "315545.0" } }, { @@ -16709,7 +16953,7 @@ "notionalCap": "20000000", "notionalFloor": "10000000", "maintMarginRatio": "0.15", - "cum": "433045.0" + "cum": "565545.0" } }, { @@ -16725,7 +16969,7 @@ "notionalCap": "30000000", "notionalFloor": "20000000", "maintMarginRatio": "0.25", - "cum": "2433045.0" + "cum": "2565545.0" } }, { @@ -16741,7 +16985,7 @@ "notionalCap": "50000000", "notionalFloor": "30000000", "maintMarginRatio": "0.5", - "cum": "9933045.0" + "cum": "1.0065545E7" } } ], @@ -18198,13 +18442,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, + "maxNotional": 50000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 10.0, "info": { "bracket": "2", "initialLeverage": "10", - "notionalCap": "25000", + "notionalCap": "50000", "notionalFloor": "5000", "maintMarginRatio": "0.025", "cum": "25.0" @@ -18213,39 +18457,39 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, + "minNotional": 50000.0, + "maxNotional": 200000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 8.0, "info": { "bracket": "3", "initialLeverage": "8", - "notionalCap": "100000", - "notionalFloor": "25000", + "notionalCap": "200000", + "notionalFloor": "50000", "maintMarginRatio": "0.05", - "cum": "650.0" + "cum": "1275.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, + "minNotional": 200000.0, + "maxNotional": 500000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "4", "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", + "notionalCap": "500000", + "notionalFloor": "200000", "maintMarginRatio": "0.1", - "cum": "5650.0" + "cum": "11275.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 250000.0, + "minNotional": 500000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 2.0, @@ -18253,9 +18497,9 @@ "bracket": "5", "initialLeverage": "2", "notionalCap": "1000000", - "notionalFloor": "250000", + "notionalFloor": "500000", "maintMarginRatio": "0.125", - "cum": "11900.0" + "cum": "23775.0" } }, { @@ -18271,7 +18515,7 @@ "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", - "cum": "386900.0" + "cum": "398775.0" } } ], @@ -18834,13 +19078,13 @@ "tier": 3.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 400000.0, + "maxNotional": 600000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "3", "initialLeverage": "20", - "notionalCap": "400000", + "notionalCap": "600000", "notionalFloor": "50000", "maintMarginRatio": "0.025", "cum": "770.0" @@ -18849,39 +19093,39 @@ { "tier": 4.0, "currency": "USDT", - "minNotional": 400000.0, - "maxNotional": 800000.0, + "minNotional": 600000.0, + "maxNotional": 1200000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "4", "initialLeverage": "10", - "notionalCap": "800000", - "notionalFloor": "400000", + "notionalCap": "1200000", + "notionalFloor": "600000", "maintMarginRatio": "0.05", - "cum": "10770.0" + "cum": "15770.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 800000.0, - "maxNotional": 2000000.0, + "minNotional": 1200000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "5", "initialLeverage": "5", - "notionalCap": "2000000", - "notionalFloor": "800000", + "notionalCap": "3000000", + "notionalFloor": "1200000", "maintMarginRatio": "0.1", - "cum": "50770.0" + "cum": "75770.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 2000000.0, + "minNotional": 3000000.0, "maxNotional": 5000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, @@ -18889,9 +19133,9 @@ "bracket": "6", "initialLeverage": "4", "notionalCap": "5000000", - "notionalFloor": "2000000", + "notionalFloor": "3000000", "maintMarginRatio": "0.125", - "cum": "100770.0" + "cum": "150770.0" } }, { @@ -18907,7 +19151,7 @@ "notionalCap": "12000000", "notionalFloor": "5000000", "maintMarginRatio": "0.25", - "cum": "725770.0" + "cum": "775770.0" } }, { @@ -18923,7 +19167,7 @@ "notionalCap": "20000000", "notionalFloor": "12000000", "maintMarginRatio": "0.5", - "cum": "3725770.0" + "cum": "3775770.0" } } ], @@ -22404,13 +22648,13 @@ "tier": 3.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 300000.0, + "maxNotional": 450000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "3", "initialLeverage": "20", - "notionalCap": "300000", + "notionalCap": "450000", "notionalFloor": "50000", "maintMarginRatio": "0.025", "cum": "300.0" @@ -22419,55 +22663,55 @@ { "tier": 4.0, "currency": "USDT", - "minNotional": 300000.0, - "maxNotional": 600000.0, + "minNotional": 450000.0, + "maxNotional": 900000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "4", "initialLeverage": "10", - "notionalCap": "600000", - "notionalFloor": "300000", + "notionalCap": "900000", + "notionalFloor": "450000", "maintMarginRatio": "0.05", - "cum": "7800.0" + "cum": "11550.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 600000.0, - "maxNotional": 1600000.0, + "minNotional": 900000.0, + "maxNotional": 2400000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "5", "initialLeverage": "5", - "notionalCap": "1600000", - "notionalFloor": "600000", + "notionalCap": "2400000", + "notionalFloor": "900000", "maintMarginRatio": "0.1", - "cum": "37800.0" + "cum": "56550.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 1600000.0, - "maxNotional": 2000000.0, + "minNotional": 2400000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, "info": { "bracket": "6", "initialLeverage": "4", - "notionalCap": "2000000", - "notionalFloor": "1600000", + "notionalCap": "3000000", + "notionalFloor": "2400000", "maintMarginRatio": "0.125", - "cum": "77800.0" + "cum": "116550.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 2000000.0, + "minNotional": 3000000.0, "maxNotional": 6000000.0, "maintenanceMarginRate": 0.25, "maxLeverage": 2.0, @@ -22475,9 +22719,9 @@ "bracket": "7", "initialLeverage": "2", "notionalCap": "6000000", - "notionalFloor": "2000000", + "notionalFloor": "3000000", "maintMarginRatio": "0.25", - "cum": "327800.0" + "cum": "491550.0" } }, { @@ -22493,7 +22737,7 @@ "notionalCap": "10000000", "notionalFloor": "6000000", "maintMarginRatio": "0.5", - "cum": "1827800.0" + "cum": "1991550.0" } } ], @@ -22618,10 +22862,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 15.0, + "maxLeverage": 25.0, "info": { "bracket": "1", - "initialLeverage": "15", + "initialLeverage": "25", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.02", @@ -22634,10 +22878,10 @@ "minNotional": 5000.0, "maxNotional": 25000.0, "maintenanceMarginRate": 0.025, - "maxLeverage": 10.0, + "maxLeverage": 20.0, "info": { "bracket": "2", - "initialLeverage": "10", + "initialLeverage": "20", "notionalCap": "25000", "notionalFloor": "5000", "maintMarginRatio": "0.025", @@ -22650,10 +22894,10 @@ "minNotional": 25000.0, "maxNotional": 600000.0, "maintenanceMarginRate": 0.05, - "maxLeverage": 8.0, + "maxLeverage": 10.0, "info": { "bracket": "3", - "initialLeverage": "8", + "initialLeverage": "10", "notionalCap": "600000", "notionalFloor": "25000", "maintMarginRatio": "0.05", @@ -23186,13 +23430,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, + "maxNotional": 50000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "2", "initialLeverage": "20", - "notionalCap": "25000", + "notionalCap": "50000", "notionalFloor": "5000", "maintMarginRatio": "0.025", "cum": "25.0" @@ -23201,23 +23445,23 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 480000.0, + "minNotional": 50000.0, + "maxNotional": 600000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "3", "initialLeverage": "10", - "notionalCap": "480000", - "notionalFloor": "25000", + "notionalCap": "600000", + "notionalFloor": "50000", "maintMarginRatio": "0.05", - "cum": "650.0" + "cum": "1275.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 480000.0, + "minNotional": 600000.0, "maxNotional": 1280000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, @@ -23225,9 +23469,9 @@ "bracket": "4", "initialLeverage": "5", "notionalCap": "1280000", - "notionalFloor": "480000", + "notionalFloor": "600000", "maintMarginRatio": "0.1", - "cum": "24650.0" + "cum": "31275.0" } }, { @@ -23243,7 +23487,7 @@ "notionalCap": "1600000", "notionalFloor": "1280000", "maintMarginRatio": "0.125", - "cum": "56650.0" + "cum": "63275.0" } }, { @@ -23259,7 +23503,7 @@ "notionalCap": "4800000", "notionalFloor": "1600000", "maintMarginRatio": "0.25", - "cum": "256650.0" + "cum": "263275.0" } }, { @@ -23275,7 +23519,7 @@ "notionalCap": "8000000", "notionalFloor": "4800000", "maintMarginRatio": "0.5", - "cum": "1456650.0" + "cum": "1463275.0" } } ], @@ -24198,10 +24442,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.006, - "maxLeverage": 30.0, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "30", + "initialLeverage": "50", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.006", @@ -24228,13 +24472,13 @@ "tier": 3.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 600000.0, + "maxNotional": 900000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "3", "initialLeverage": "20", - "notionalCap": "600000", + "notionalCap": "900000", "notionalFloor": "50000", "maintMarginRatio": "0.025", "cum": "770.0" @@ -24243,81 +24487,81 @@ { "tier": 4.0, "currency": "USDT", - "minNotional": 600000.0, - "maxNotional": 1200000.0, + "minNotional": 900000.0, + "maxNotional": 1800000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "4", "initialLeverage": "10", - "notionalCap": "1200000", - "notionalFloor": "600000", + "notionalCap": "1800000", + "notionalFloor": "900000", "maintMarginRatio": "0.05", - "cum": "15770.0" + "cum": "23270.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 1200000.0, - "maxNotional": 3200000.0, + "minNotional": 1800000.0, + "maxNotional": 4800000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "5", "initialLeverage": "5", - "notionalCap": "3200000", - "notionalFloor": "1200000", + "notionalCap": "4800000", + "notionalFloor": "1800000", "maintMarginRatio": "0.1", - "cum": "75770.0" + "cum": "113270.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 3200000.0, - "maxNotional": 5000000.0, + "minNotional": 4800000.0, + "maxNotional": 6000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, "info": { "bracket": "6", "initialLeverage": "4", - "notionalCap": "5000000", - "notionalFloor": "3200000", + "notionalCap": "6000000", + "notionalFloor": "4800000", "maintMarginRatio": "0.125", - "cum": "155770.0" + "cum": "233270.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 5000000.0, - "maxNotional": 12000000.0, + "minNotional": 6000000.0, + "maxNotional": 18000000.0, "maintenanceMarginRate": 0.25, "maxLeverage": 2.0, "info": { "bracket": "7", "initialLeverage": "2", - "notionalCap": "12000000", - "notionalFloor": "5000000", + "notionalCap": "18000000", + "notionalFloor": "6000000", "maintMarginRatio": "0.25", - "cum": "780770.0" + "cum": "983270.0" } }, { "tier": 8.0, "currency": "USDT", - "minNotional": 12000000.0, - "maxNotional": 20000000.0, + "minNotional": 18000000.0, + "maxNotional": 30000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "8", "initialLeverage": "1", - "notionalCap": "20000000", - "notionalFloor": "12000000", + "notionalCap": "30000000", + "notionalFloor": "18000000", "maintMarginRatio": "0.5", - "cum": "3780770.0" + "cum": "5483270.0" } } ], @@ -24570,13 +24814,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, + "maxNotional": 50000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "2", "initialLeverage": "20", - "notionalCap": "25000", + "notionalCap": "50000", "notionalFloor": "5000", "maintMarginRatio": "0.025", "cum": "25.0" @@ -24585,39 +24829,39 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 200000.0, + "minNotional": 50000.0, + "maxNotional": 300000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "3", "initialLeverage": "10", - "notionalCap": "200000", - "notionalFloor": "25000", + "notionalCap": "300000", + "notionalFloor": "50000", "maintMarginRatio": "0.05", - "cum": "650.0" + "cum": "1275.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 200000.0, - "maxNotional": 500000.0, + "minNotional": 300000.0, + "maxNotional": 800000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "4", "initialLeverage": "5", - "notionalCap": "500000", - "notionalFloor": "200000", + "notionalCap": "800000", + "notionalFloor": "300000", "maintMarginRatio": "0.1", - "cum": "10650.0" + "cum": "16275.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 500000.0, + "minNotional": 800000.0, "maxNotional": 1000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, @@ -24625,9 +24869,9 @@ "bracket": "5", "initialLeverage": "4", "notionalCap": "1000000", - "notionalFloor": "500000", + "notionalFloor": "800000", "maintMarginRatio": "0.125", - "cum": "23150.0" + "cum": "36275.0" } }, { @@ -24643,7 +24887,7 @@ "notionalCap": "3000000", "notionalFloor": "1000000", "maintMarginRatio": "0.25", - "cum": "148150.0" + "cum": "161275.0" } }, { @@ -24659,7 +24903,7 @@ "notionalCap": "5000000", "notionalFloor": "3000000", "maintMarginRatio": "0.5", - "cum": "898150.0" + "cum": "911275.0" } } ], @@ -25254,13 +25498,13 @@ "tier": 4.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 250000.0, + "maxNotional": 500000.0, "maintenanceMarginRate": 0.02, "maxLeverage": 25.0, "info": { "bracket": "4", "initialLeverage": "25", - "notionalCap": "250000", + "notionalCap": "500000", "notionalFloor": "50000", "maintMarginRatio": "0.02", "cum": "545.0" @@ -25269,39 +25513,39 @@ { "tier": 5.0, "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, + "minNotional": 500000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "5", "initialLeverage": "10", - "notionalCap": "1000000", - "notionalFloor": "250000", + "notionalCap": "2000000", + "notionalFloor": "500000", "maintMarginRatio": "0.05", - "cum": "8045.0" + "cum": "15545.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "minNotional": 2000000.0, + "maxNotional": 8000000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "6", "initialLeverage": "5", - "notionalCap": "5000000", - "notionalFloor": "1000000", + "notionalCap": "8000000", + "notionalFloor": "2000000", "maintMarginRatio": "0.1", - "cum": "58045.0" + "cum": "115545.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 5000000.0, + "minNotional": 8000000.0, "maxNotional": 10000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, @@ -25309,9 +25553,9 @@ "bracket": "7", "initialLeverage": "4", "notionalCap": "10000000", - "notionalFloor": "5000000", + "notionalFloor": "8000000", "maintMarginRatio": "0.125", - "cum": "183045.0" + "cum": "315545.0" } }, { @@ -25327,7 +25571,7 @@ "notionalCap": "20000000", "notionalFloor": "10000000", "maintMarginRatio": "0.15", - "cum": "433045.0" + "cum": "565545.0" } }, { @@ -25343,7 +25587,7 @@ "notionalCap": "30000000", "notionalFloor": "20000000", "maintMarginRatio": "0.25", - "cum": "2433045.0" + "cum": "2565545.0" } }, { @@ -25359,7 +25603,7 @@ "notionalCap": "50000000", "notionalFloor": "30000000", "maintMarginRatio": "0.5", - "cum": "9933045.0" + "cum": "1.0065545E7" } } ], diff --git a/freqtrade/exchange/exchange.py b/freqtrade/exchange/exchange.py index 27e6ea63a..2cf98c266 100644 --- a/freqtrade/exchange/exchange.py +++ b/freqtrade/exchange/exchange.py @@ -301,7 +301,7 @@ class Exchange: return list((self._api.timeframes or {}).keys()) @property - def markets(self) -> Dict: + def markets(self) -> Dict[str, Any]: """exchange ccxt markets""" if not self._markets: logger.info("Markets were not loaded. Loading them now..") @@ -1148,8 +1148,8 @@ class Exchange: else: limit_rate = stop_price * (2 - limit_price_pct) - bad_stop_price = ((stop_price <= limit_rate) if side == - "sell" else (stop_price >= limit_rate)) + bad_stop_price = ((stop_price < limit_rate) if side == + "sell" else (stop_price > limit_rate)) # Ensure rate is less than stop price if bad_stop_price: # This can for example happen if the stop / liquidation price is set to 0 @@ -1662,39 +1662,18 @@ class Exchange: price_side = self._get_price_side(side, is_short, conf_strategy) - price_side_word = price_side.capitalize() - if conf_strategy.get('use_order_book', False): order_book_top = conf_strategy.get('order_book_top', 1) if order_book is None: order_book = self.fetch_l2_order_book(pair, order_book_top) - logger.debug('order_book %s', order_book) - # top 1 = index 0 - try: - obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks' - rate = order_book[obside][order_book_top - 1][0] - except (IndexError, KeyError) as e: - logger.warning( - f"{pair} - {name} Price at location {order_book_top} from orderbook " - f"could not be determined. Orderbook: {order_book}" - ) - raise PricingError from e - logger.debug(f"{pair} - {name} price from orderbook {price_side_word}" - f"side - top {order_book_top} order book {side} rate {rate:.8f}") + rate = self._get_rate_from_ob(pair, side, order_book, name, price_side, + order_book_top) else: - logger.debug(f"Using Last {price_side_word} / Last Price") + logger.debug(f"Using Last {price_side.capitalize()} / Last Price") if ticker is None: ticker = self.fetch_ticker(pair) - ticker_rate = ticker[price_side] - if ticker['last'] and ticker_rate: - if side == 'entry' and ticker_rate > ticker['last']: - balance = conf_strategy.get('price_last_balance', 0.0) - ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate) - elif side == 'exit' and ticker_rate < ticker['last']: - balance = conf_strategy.get('price_last_balance', 0.0) - ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last']) - rate = ticker_rate + rate = self._get_rate_from_ticker(side, ticker, conf_strategy, price_side) if rate is None: raise PricingError(f"{name}-Rate for {pair} was empty.") @@ -1703,6 +1682,43 @@ class Exchange: return rate + def _get_rate_from_ticker(self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any], + price_side: BidAsk) -> Optional[float]: + """ + Get rate from ticker. + """ + ticker_rate = ticker[price_side] + if ticker['last'] and ticker_rate: + if side == 'entry' and ticker_rate > ticker['last']: + balance = conf_strategy.get('price_last_balance', 0.0) + ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate) + elif side == 'exit' and ticker_rate < ticker['last']: + balance = conf_strategy.get('price_last_balance', 0.0) + ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last']) + rate = ticker_rate + return rate + + def _get_rate_from_ob(self, pair: str, side: EntryExit, order_book: OrderBook, name: str, + price_side: BidAsk, order_book_top: int) -> float: + """ + Get rate from orderbook + :raises: PricingError if rate could not be determined. + """ + logger.debug('order_book %s', order_book) + # top 1 = index 0 + try: + obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks' + rate = order_book[obside][order_book_top - 1][0] + except (IndexError, KeyError) as e: + logger.warning( + f"{pair} - {name} Price at location {order_book_top} from orderbook " + f"could not be determined. Orderbook: {order_book}" + ) + raise PricingError from e + logger.debug(f"{pair} - {name} price from orderbook {price_side.capitalize()}" + f"side - top {order_book_top} order book {side} rate {rate:.8f}") + return rate + def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]: entry_rate = None exit_rate = None diff --git a/freqtrade/exchange/exchange_utils.py b/freqtrade/exchange/exchange_utils.py index c6c2d5a24..fe7264dd9 100644 --- a/freqtrade/exchange/exchange_utils.py +++ b/freqtrade/exchange/exchange_utils.py @@ -9,7 +9,9 @@ import ccxt from ccxt import (DECIMAL_PLACES, ROUND, ROUND_DOWN, ROUND_UP, SIGNIFICANT_DIGITS, TICK_SIZE, TRUNCATE, decimal_to_precision) -from freqtrade.exchange.common import BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED +from freqtrade.exchange.common import (BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED, + SUPPORTED_EXCHANGES) +from freqtrade.types import ValidExchangesType from freqtrade.util import FtPrecise from freqtrade.util.datetime_helpers import dt_from_ts, dt_ts @@ -55,14 +57,41 @@ def validate_exchange(exchange: str) -> Tuple[bool, str]: return True, '' -def validate_exchanges(all_exchanges: bool) -> List[Tuple[str, bool, str]]: +def _build_exchange_list_entry( + exchange_name: str, exchangeClasses: Dict[str, Any]) -> ValidExchangesType: + valid, comment = validate_exchange(exchange_name) + result: ValidExchangesType = { + 'name': exchange_name, + 'valid': valid, + 'supported': exchange_name.lower() in SUPPORTED_EXCHANGES, + 'comment': comment, + 'trade_modes': [{'trading_mode': 'spot', 'margin_mode': ''}], + } + if resolved := exchangeClasses.get(exchange_name.lower()): + supported_modes = [{'trading_mode': 'spot', 'margin_mode': ''}] + [ + {'trading_mode': tm.value, 'margin_mode': mm.value} + for tm, mm in resolved['class']._supported_trading_mode_margin_pairs + ] + result.update({ + 'trade_modes': supported_modes, + }) + + return result + + +def list_available_exchanges(all_exchanges: bool) -> List[ValidExchangesType]: """ :return: List of tuples with exchangename, valid, reason. """ exchanges = ccxt_exchanges() if all_exchanges else available_exchanges() - exchanges_valid = [ - (e, *validate_exchange(e)) for e in exchanges + from freqtrade.resolvers.exchange_resolver import ExchangeResolver + + subclassed = {e['name'].lower(): e for e in ExchangeResolver.search_all_objects({}, False)} + + exchanges_valid: List[ValidExchangesType] = [ + _build_exchange_list_entry(e, subclassed) for e in exchanges ] + return exchanges_valid diff --git a/freqtrade/exchange/gate.py b/freqtrade/exchange/gate.py index 2ac135fc1..eceab4ec1 100644 --- a/freqtrade/exchange/gate.py +++ b/freqtrade/exchange/gate.py @@ -33,7 +33,6 @@ class Gate(Exchange): _ft_has_futures: Dict = { "needs_trading_fees": True, "marketOrderRequiresPrice": False, - "tickers_have_bid_ask": False, "fee_cost_in_contracts": False, # Set explicitly to false for clarity "order_props_in_contracts": ['amount', 'filled', 'remaining'], "stop_price_type_field": "price_type", diff --git a/freqtrade/exchange/okx.py b/freqtrade/exchange/okx.py index af889897c..c703e3a78 100644 --- a/freqtrade/exchange/okx.py +++ b/freqtrade/exchange/okx.py @@ -125,6 +125,20 @@ class Okx(Exchange): params['posSide'] = self._get_posSide(side, reduceOnly) return params + def __fetch_leverage_already_set(self, pair: str, leverage: float, side: BuySell) -> bool: + try: + res_lev = self._api.fetch_leverage(symbol=pair, params={ + "mgnMode": self.margin_mode.value, + "posSide": self._get_posSide(side, False), + }) + self._log_exchange_response('get_leverage', res_lev) + already_set = all(float(x['lever']) == leverage for x in res_lev['data']) + return already_set + + except ccxt.BaseError: + # Assume all errors as "not set yet" + return False + @retrier def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False): if self.trading_mode != TradingMode.SPOT and self.margin_mode is not None: @@ -141,8 +155,11 @@ class Okx(Exchange): except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: - raise TemporaryError( - f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e + already_set = self.__fetch_leverage_already_set(pair, leverage, side) + if not already_set: + raise TemporaryError( + f'Could not set leverage due to {e.__class__.__name__}. Message: {e}' + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -182,6 +199,7 @@ class Okx(Exchange): order_reg['type'] = 'stoploss' order_reg['status_stop'] = 'triggered' return order_reg + order = self._order_contracts_to_amount(order) order['type'] = 'stoploss' return order diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 42e644f0a..91c7501c6 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -2,7 +2,7 @@ import logging import random from abc import abstractmethod from enum import Enum -from typing import Optional, Type, Union +from typing import List, Optional, Type, Union import gymnasium as gym import numpy as np @@ -141,6 +141,9 @@ class BaseEnvironment(gym.Env): Unique to the environment action count. Must be inherited. """ + def action_masks(self) -> List[bool]: + return [self._is_valid(action.value) for action in self.actions] + def seed(self, seed: int = 1): self.np_random, seed = seeding.np_random(seed) return [seed] diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 8ee3c7c56..06ce772a8 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -13,7 +13,8 @@ import pandas as pd import torch as th import torch.multiprocessing from pandas import DataFrame -from stable_baselines3.common.callbacks import EvalCallback +from sb3_contrib.common.maskable.callbacks import MaskableEvalCallback +from sb3_contrib.common.maskable.utils import is_masking_supported from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor @@ -48,7 +49,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env() self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env() - self.eval_callback: Optional[EvalCallback] = None + self.eval_callback: Optional[MaskableEvalCallback] = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] self.df_raw: DataFrame = DataFrame() @@ -82,6 +83,9 @@ class BaseReinforcementLearningModel(IFreqaiModel): if self.ft_params.get('use_DBSCAN_to_remove_outliers', False): self.ft_params.update({'use_DBSCAN_to_remove_outliers': False}) logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.') + if self.ft_params.get('DI_threshold', False): + self.ft_params.update({'DI_threshold': False}) + logger.warning('User tried to use DI_threshold with RL. Deactivating DI_threshold.') if self.freqai_info['data_split_parameters'].get('shuffle', False): self.freqai_info['data_split_parameters'].update({'shuffle': False}) logger.warning('User tried to shuffle training data. Setting shuffle to False') @@ -107,27 +111,37 @@ class BaseReinforcementLearningModel(IFreqaiModel): training_filter=True, ) - data_dictionary: Dict[str, Any] = dk.make_train_test_datasets( + dd: Dict[str, Any] = dk.make_train_test_datasets( features_filtered, labels_filtered) - self.df_raw = copy.deepcopy(data_dictionary["train_features"]) + self.df_raw = copy.deepcopy(dd["train_features"]) dk.fit_labels() # FIXME useless for now, but just satiating append methods # normalize all data based on train_dataset only prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk) - data_dictionary = dk.normalize_data(data_dictionary) + dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) - # data cleaning/analysis - self.data_cleaning_train(dk) + (dd["train_features"], + dd["train_labels"], + dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], + dd["train_labels"], + dd["train_weights"]) + + if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + (dd["test_features"], + dd["test_labels"], + dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], + dd["test_labels"], + dd["test_weights"]) logger.info( f'Training model on {len(dk.data_dictionary["train_features"].columns)}' - f' features and {len(data_dictionary["train_features"])} data points' + f' features and {len(dd["train_features"])} data points' ) - self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk) + self.set_train_and_eval_environments(dd, prices_train, prices_test, dk) - model = self.fit(data_dictionary, dk) + model = self.fit(dd, dk) logger.info(f"--------------------done training {pair}--------------------") @@ -151,9 +165,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, **env_info) self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, **env_info)) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=len(train_df), - best_model_save_path=str(dk.data_path)) + self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=len(train_df), + best_model_save_path=str(dk.data_path), + use_masking=(self.model_type == 'MaskablePPO' and + is_masking_supported(self.eval_env))) actions = self.train_env.get_actions() self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) @@ -236,13 +252,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): unfiltered_df, dk.training_features_list, training_filter=False ) - filtered_dataframe = self.drop_ohlc_from_df(filtered_dataframe, dk) + dk.data_dictionary["prediction_features"] = self.drop_ohlc_from_df(filtered_dataframe, dk) - filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe) - dk.data_dictionary["prediction_features"] = filtered_dataframe - - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk) + dk.data_dictionary["prediction_features"], _, _ = dk.feature_pipeline.transform( + dk.data_dictionary["prediction_features"], outlier_check=True) pred_df = self.rl_model_predict( dk.data_dictionary["prediction_features"], dk, self.model) diff --git a/freqtrade/freqai/base_models/BaseClassifierModel.py b/freqtrade/freqai/base_models/BaseClassifierModel.py index ffd42dd1d..42b5c1a0e 100644 --- a/freqtrade/freqai/base_models/BaseClassifierModel.py +++ b/freqtrade/freqai/base_models/BaseClassifierModel.py @@ -17,8 +17,8 @@ logger = logging.getLogger(__name__) class BaseClassifierModel(IFreqaiModel): """ Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.). - User *must* inherit from this class and set fit() and predict(). See example scripts - such as prediction_models/CatboostPredictionModel.py for guidance. + User *must* inherit from this class and set fit(). See example scripts + such as prediction_models/CatboostClassifier.py for guidance. """ def train( @@ -50,21 +50,30 @@ class BaseClassifierModel(IFreqaiModel): logger.info(f"-------------------- Training on data from {start_date} to " f"{end_date} --------------------") # split data into train/test data. - data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered) + dd = dk.make_train_test_datasets(features_filtered, labels_filtered) if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live: dk.fit_labels() - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) + dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) + (dd["train_features"], + dd["train_labels"], + dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], + dd["train_labels"], + dd["train_weights"]) + + if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + (dd["test_features"], + dd["test_labels"], + dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], + dd["test_labels"], + dd["test_weights"]) logger.info( f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" ) - logger.info(f"Training model on {len(data_dictionary['train_features'])} data points") + logger.info(f"Training model on {len(dd['train_features'])} data points") - model = self.fit(data_dictionary, dk) + model = self.fit(dd, dk) end_time = time() @@ -89,10 +98,11 @@ class BaseClassifierModel(IFreqaiModel): filtered_df, _ = dk.filter_features( unfiltered_df, dk.training_features_list, training_filter=False ) - filtered_df = dk.normalize_data_from_metadata(filtered_df) + dk.data_dictionary["prediction_features"] = filtered_df - self.data_cleaning_predict(dk) + dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( + dk.data_dictionary["prediction_features"], outlier_check=True) predictions = self.model.predict(dk.data_dictionary["prediction_features"]) if self.CONV_WIDTH == 1: @@ -107,4 +117,10 @@ class BaseClassifierModel(IFreqaiModel): pred_df = pd.concat([pred_df, pred_df_prob], axis=1) + if dk.feature_pipeline["di"]: + dk.DI_values = dk.feature_pipeline["di"].di_values + else: + dk.DI_values = np.zeros(outliers.shape[0]) + dk.do_predict = outliers + return (pred_df, dk.do_predict) diff --git a/freqtrade/freqai/base_models/BasePyTorchClassifier.py b/freqtrade/freqai/base_models/BasePyTorchClassifier.py index 436294dcc..4780af818 100644 --- a/freqtrade/freqai/base_models/BasePyTorchClassifier.py +++ b/freqtrade/freqai/base_models/BasePyTorchClassifier.py @@ -1,5 +1,6 @@ import logging -from typing import Dict, List, Tuple +from time import time +from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt @@ -35,6 +36,7 @@ class BasePyTorchClassifier(BasePyTorchModel): return dataframe """ + def __init__(self, **kwargs): super().__init__(**kwargs) self.class_name_to_index = None @@ -68,9 +70,12 @@ class BasePyTorchClassifier(BasePyTorchModel): filtered_df, _ = dk.filter_features( unfiltered_df, dk.training_features_list, training_filter=False ) - filtered_df = dk.normalize_data_from_metadata(filtered_df) + dk.data_dictionary["prediction_features"] = filtered_df - self.data_cleaning_predict(dk) + + dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( + dk.data_dictionary["prediction_features"], outlier_check=True) + x = self.data_convertor.convert_x( dk.data_dictionary["prediction_features"], device=self.device @@ -85,6 +90,13 @@ class BasePyTorchClassifier(BasePyTorchModel): pred_df_prob = DataFrame(probs.detach().tolist(), columns=class_names) pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]]) pred_df = pd.concat([pred_df, pred_df_prob], axis=1) + + if dk.feature_pipeline["di"]: + dk.DI_values = dk.feature_pipeline["di"].di_values + else: + dk.DI_values = np.zeros(outliers.shape[0]) + dk.do_predict = outliers + return (pred_df, dk.do_predict) def encode_class_names( @@ -149,3 +161,58 @@ class BasePyTorchClassifier(BasePyTorchModel): ) return self.class_names + + def train( + self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_df: Full dataframe for the current training period + :return: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info(f"-------------------- Starting training {pair} --------------------") + + start_time = time() + + features_filtered, labels_filtered = dk.filter_features( + unfiltered_df, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + # split data into train/test data. + dd = dk.make_train_test_datasets(features_filtered, labels_filtered) + if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live: + dk.fit_labels() + + dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) + + (dd["train_features"], + dd["train_labels"], + dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], + dd["train_labels"], + dd["train_weights"]) + + if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + (dd["test_features"], + dd["test_labels"], + dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], + dd["test_labels"], + dd["test_weights"]) + + logger.info( + f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" + ) + logger.info(f"Training model on {len(dd['train_features'])} data points") + + model = self.fit(dd, dk) + end_time = time() + + logger.info(f"-------------------- Done training {pair} " + f"({end_time - start_time:.2f} secs) --------------------") + + return model diff --git a/freqtrade/freqai/base_models/BasePyTorchModel.py b/freqtrade/freqai/base_models/BasePyTorchModel.py index 82042d24c..71369a146 100644 --- a/freqtrade/freqai/base_models/BasePyTorchModel.py +++ b/freqtrade/freqai/base_models/BasePyTorchModel.py @@ -1,12 +1,8 @@ import logging from abc import ABC, abstractmethod -from time import time -from typing import Any import torch -from pandas import DataFrame -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor @@ -29,51 +25,6 @@ class BasePyTorchModel(IFreqaiModel, ABC): self.splits = ["train", "test"] if test_size != 0 else ["train"] self.window_size = self.freqai_info.get("conv_width", 1) - def train( - self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_df: Full dataframe for the current training period - :return: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info(f"-------------------- Starting training {pair} --------------------") - - start_time = time() - - features_filtered, labels_filtered = dk.filter_features( - unfiltered_df, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - # split data into train/test data. - data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered) - if not self.freqai_info.get("fit_live_predictions", 0) or not self.live: - dk.fit_labels() - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" - ) - logger.info(f"Training model on {len(data_dictionary['train_features'])} data points") - - model = self.fit(data_dictionary, dk) - end_time = time() - - logger.info(f"-------------------- Done training {pair} " - f"({end_time - start_time:.2f} secs) --------------------") - - return model - @property @abstractmethod def data_convertor(self) -> PyTorchDataConvertor: diff --git a/freqtrade/freqai/base_models/BasePyTorchRegressor.py b/freqtrade/freqai/base_models/BasePyTorchRegressor.py index 6139f2e85..83fea4ef9 100644 --- a/freqtrade/freqai/base_models/BasePyTorchRegressor.py +++ b/freqtrade/freqai/base_models/BasePyTorchRegressor.py @@ -1,5 +1,6 @@ import logging -from typing import Tuple +from time import time +from typing import Any, Tuple import numpy as np import numpy.typing as npt @@ -17,6 +18,7 @@ class BasePyTorchRegressor(BasePyTorchModel): A PyTorch implementation of a regressor. User must implement fit method """ + def __init__(self, **kwargs): super().__init__(**kwargs) @@ -36,10 +38,11 @@ class BasePyTorchRegressor(BasePyTorchModel): filtered_df, _ = dk.filter_features( unfiltered_df, dk.training_features_list, training_filter=False ) - filtered_df = dk.normalize_data_from_metadata(filtered_df) dk.data_dictionary["prediction_features"] = filtered_df - self.data_cleaning_predict(dk) + dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( + dk.data_dictionary["prediction_features"], outlier_check=True) + x = self.data_convertor.convert_x( dk.data_dictionary["prediction_features"], device=self.device @@ -47,5 +50,71 @@ class BasePyTorchRegressor(BasePyTorchModel): self.model.model.eval() y = self.model.model(x) pred_df = DataFrame(y.detach().tolist(), columns=[dk.label_list[0]]) - pred_df = dk.denormalize_labels_from_metadata(pred_df) + pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df) + + if dk.feature_pipeline["di"]: + dk.DI_values = dk.feature_pipeline["di"].di_values + else: + dk.DI_values = np.zeros(outliers.shape[0]) + dk.do_predict = outliers return (pred_df, dk.do_predict) + + def train( + self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs + ) -> Any: + """ + Filter the training data and train a model to it. Train makes heavy use of the datakitchen + for storing, saving, loading, and analyzing the data. + :param unfiltered_df: Full dataframe for the current training period + :return: + :model: Trained model which can be used to inference (self.predict) + """ + + logger.info(f"-------------------- Starting training {pair} --------------------") + + start_time = time() + + features_filtered, labels_filtered = dk.filter_features( + unfiltered_df, + dk.training_features_list, + dk.label_list, + training_filter=True, + ) + + # split data into train/test data. + dd = dk.make_train_test_datasets(features_filtered, labels_filtered) + if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live: + dk.fit_labels() + dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) + dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count) + + dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) + dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) + + (dd["train_features"], + dd["train_labels"], + dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], + dd["train_labels"], + dd["train_weights"]) + dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) + + if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + (dd["test_features"], + dd["test_labels"], + dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], + dd["test_labels"], + dd["test_weights"]) + dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) + + logger.info( + f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" + ) + logger.info(f"Training model on {len(dd['train_features'])} data points") + + model = self.fit(dd, dk) + end_time = time() + + logger.info(f"-------------------- Done training {pair} " + f"({end_time - start_time:.2f} secs) --------------------") + + return model diff --git a/freqtrade/freqai/base_models/BaseRegressionModel.py b/freqtrade/freqai/base_models/BaseRegressionModel.py index 1f9b4f5a6..179e4be87 100644 --- a/freqtrade/freqai/base_models/BaseRegressionModel.py +++ b/freqtrade/freqai/base_models/BaseRegressionModel.py @@ -16,8 +16,8 @@ logger = logging.getLogger(__name__) class BaseRegressionModel(IFreqaiModel): """ Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.). - User *must* inherit from this class and set fit() and predict(). See example scripts - such as prediction_models/CatboostPredictionModel.py for guidance. + User *must* inherit from this class and set fit(). See example scripts + such as prediction_models/CatboostRegressor.py for guidance. """ def train( @@ -49,21 +49,33 @@ class BaseRegressionModel(IFreqaiModel): logger.info(f"-------------------- Training on data from {start_date} to " f"{end_date} --------------------") # split data into train/test data. - data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered) + dd = dk.make_train_test_datasets(features_filtered, labels_filtered) if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live: dk.fit_labels() - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) + dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) + dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count) - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) + (dd["train_features"], + dd["train_labels"], + dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], + dd["train_labels"], + dd["train_weights"]) + dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) + + if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + (dd["test_features"], + dd["test_labels"], + dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], + dd["test_labels"], + dd["test_weights"]) + dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) logger.info( f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" ) - logger.info(f"Training model on {len(data_dictionary['train_features'])} data points") + logger.info(f"Training model on {len(dd['train_features'])} data points") - model = self.fit(data_dictionary, dk) + model = self.fit(dd, dk) end_time = time() @@ -85,14 +97,12 @@ class BaseRegressionModel(IFreqaiModel): """ dk.find_features(unfiltered_df) - filtered_df, _ = dk.filter_features( + dk.data_dictionary["prediction_features"], _ = dk.filter_features( unfiltered_df, dk.training_features_list, training_filter=False ) - filtered_df = dk.normalize_data_from_metadata(filtered_df) - dk.data_dictionary["prediction_features"] = filtered_df - # optional additional data cleaning/analysis - self.data_cleaning_predict(dk) + dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( + dk.data_dictionary["prediction_features"], outlier_check=True) predictions = self.model.predict(dk.data_dictionary["prediction_features"]) if self.CONV_WIDTH == 1: @@ -100,6 +110,11 @@ class BaseRegressionModel(IFreqaiModel): pred_df = DataFrame(predictions, columns=dk.label_list) - pred_df = dk.denormalize_labels_from_metadata(pred_df) + pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df) + if dk.feature_pipeline["di"]: + dk.DI_values = dk.feature_pipeline["di"].di_values + else: + dk.DI_values = np.zeros(outliers.shape[0]) + dk.do_predict = outliers return (pred_df, dk.do_predict) diff --git a/freqtrade/freqai/base_models/BaseTensorFlowModel.py b/freqtrade/freqai/base_models/BaseTensorFlowModel.py deleted file mode 100644 index b41ee0175..000000000 --- a/freqtrade/freqai/base_models/BaseTensorFlowModel.py +++ /dev/null @@ -1,70 +0,0 @@ -import logging -from time import time -from typing import Any - -from pandas import DataFrame - -from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.freqai_interface import IFreqaiModel - - -logger = logging.getLogger(__name__) - - -class BaseTensorFlowModel(IFreqaiModel): - """ - Base class for TensorFlow type models. - User *must* inherit from this class and set fit() and predict(). - """ - - def train( - self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs - ) -> Any: - """ - Filter the training data and train a model to it. Train makes heavy use of the datakitchen - for storing, saving, loading, and analyzing the data. - :param unfiltered_df: Full dataframe for the current training period - :param metadata: pair metadata from strategy. - :return: - :model: Trained model which can be used to inference (self.predict) - """ - - logger.info(f"-------------------- Starting training {pair} --------------------") - - start_time = time() - - # filter the features requested by user in the configuration file and elegantly handle NaNs - features_filtered, labels_filtered = dk.filter_features( - unfiltered_df, - dk.training_features_list, - dk.label_list, - training_filter=True, - ) - - start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d") - end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d") - logger.info(f"-------------------- Training on data from {start_date} to " - f"{end_date} --------------------") - # split data into train/test data. - data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered) - if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live: - dk.fit_labels() - # normalize all data based on train_dataset only - data_dictionary = dk.normalize_data(data_dictionary) - - # optional additional data cleaning/analysis - self.data_cleaning_train(dk) - - logger.info( - f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" - ) - logger.info(f"Training model on {len(data_dictionary['train_features'])} data points") - - model = self.fit(data_dictionary, dk) - - end_time = time() - - logger.info(f"-------------------- Done training {pair} " - f"({end_time - start_time:.2f} secs) --------------------") - - return model diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index b68a9dcad..5fdb8be25 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -20,6 +20,7 @@ from pandas import DataFrame from freqtrade.configuration import TimeRange from freqtrade.constants import Config from freqtrade.data.history import load_pair_history +from freqtrade.enums import CandleType from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.strategy.interface import IStrategy @@ -27,6 +28,11 @@ from freqtrade.strategy.interface import IStrategy logger = logging.getLogger(__name__) +FEATURE_PIPELINE = "feature_pipeline" +LABEL_PIPELINE = "label_pipeline" +TRAINDF = "trained_df" +METADATA = "metadata" + class pair_info(TypedDict): model_filename: str @@ -424,7 +430,7 @@ class FreqaiDataDrawer: dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns) dk.data["label_list"] = dk.label_list - with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp: + with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp: rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE) return @@ -449,39 +455,39 @@ class FreqaiDataDrawer: elif self.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]: model.save(save_path / f"{dk.model_filename}_model.zip") - if dk.svm_model is not None: - dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib") - dk.data["data_path"] = str(dk.data_path) dk.data["model_filename"] = str(dk.model_filename) dk.data["training_features_list"] = dk.training_features_list dk.data["label_list"] = dk.label_list # store the metadata - with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp: + with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp: rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE) - # save the train data to file so we can check preds for area of applicability later + # save the pipelines to pickle files + with (save_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("wb") as fp: + cloudpickle.dump(dk.feature_pipeline, fp) + + with (save_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("wb") as fp: + cloudpickle.dump(dk.label_pipeline, fp) + + # save the train data to file for post processing if desired dk.data_dictionary["train_features"].to_pickle( - save_path / f"{dk.model_filename}_trained_df.pkl" + save_path / f"{dk.model_filename}_{TRAINDF}.pkl" ) dk.data_dictionary["train_dates"].to_pickle( save_path / f"{dk.model_filename}_trained_dates_df.pkl" ) - if self.freqai_info["feature_parameters"].get("principal_component_analysis"): - cloudpickle.dump( - dk.pca, (dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("wb") - ) - self.model_dictionary[coin] = model self.pair_dict[coin]["model_filename"] = dk.model_filename self.pair_dict[coin]["data_path"] = str(dk.data_path) if coin not in self.meta_data_dictionary: self.meta_data_dictionary[coin] = {} - self.meta_data_dictionary[coin]["train_df"] = dk.data_dictionary["train_features"] - self.meta_data_dictionary[coin]["meta_data"] = dk.data + self.meta_data_dictionary[coin][METADATA] = dk.data + self.meta_data_dictionary[coin][FEATURE_PIPELINE] = dk.feature_pipeline + self.meta_data_dictionary[coin][LABEL_PIPELINE] = dk.label_pipeline self.save_drawer_to_disk() return @@ -491,7 +497,7 @@ class FreqaiDataDrawer: Load only metadata into datakitchen to increase performance during presaved backtesting (prediction file loading). """ - with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp: + with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp: dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE) dk.training_features_list = dk.data["training_features_list"] dk.label_list = dk.data["label_list"] @@ -511,15 +517,17 @@ class FreqaiDataDrawer: dk.data_path = Path(self.pair_dict[coin]["data_path"]) if coin in self.meta_data_dictionary: - dk.data = self.meta_data_dictionary[coin]["meta_data"] - dk.data_dictionary["train_features"] = self.meta_data_dictionary[coin]["train_df"] + dk.data = self.meta_data_dictionary[coin][METADATA] + dk.feature_pipeline = self.meta_data_dictionary[coin][FEATURE_PIPELINE] + dk.label_pipeline = self.meta_data_dictionary[coin][LABEL_PIPELINE] else: - with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp: + with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp: dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE) - dk.data_dictionary["train_features"] = pd.read_pickle( - dk.data_path / f"{dk.model_filename}_trained_df.pkl" - ) + with (dk.data_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("rb") as fp: + dk.feature_pipeline = cloudpickle.load(fp) + with (dk.data_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("rb") as fp: + dk.label_pipeline = cloudpickle.load(fp) dk.training_features_list = dk.data["training_features_list"] dk.label_list = dk.data["label_list"] @@ -529,9 +537,6 @@ class FreqaiDataDrawer: model = self.model_dictionary[coin] elif self.model_type == 'joblib': model = load(dk.data_path / f"{dk.model_filename}_model.joblib") - elif self.model_type == 'keras': - from tensorflow import keras - model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type: mod = importlib.import_module( self.model_type, self.freqai_info['rl_config']['model_type']) @@ -543,9 +548,6 @@ class FreqaiDataDrawer: model = zip["pytrainer"] model = model.load_from_checkpoint(zip) - if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file(): - dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib") - if not model: raise OperationalException( f"Unable to load model, ensure model exists at " f"{dk.data_path} " @@ -555,11 +557,6 @@ class FreqaiDataDrawer: if coin not in self.model_dictionary: self.model_dictionary[coin] = model - if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: - dk.pca = cloudpickle.load( - (dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("rb") - ) - return model def update_historic_data(self, strategy: IStrategy, dk: FreqaiDataKitchen) -> None: @@ -639,7 +636,7 @@ class FreqaiDataDrawer: pair=pair, timerange=timerange, data_format=self.config.get("dataformat_ohlcv", "json"), - candle_type=self.config.get("trading_mode", "spot"), + candle_type=self.config.get("candle_type_def", CandleType.SPOT), ) def get_base_and_corr_dataframes( diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 21b41db2d..7d4bf39ca 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -4,7 +4,6 @@ import logging import random import shutil from datetime import datetime, timezone -from math import cos, sin from pathlib import Path from typing import Any, Dict, List, Optional, Tuple @@ -12,16 +11,12 @@ import numpy as np import numpy.typing as npt import pandas as pd import psutil +from datasieve.pipeline import Pipeline from pandas import DataFrame -from scipy import stats -from sklearn import linear_model -from sklearn.cluster import DBSCAN -from sklearn.metrics.pairwise import pairwise_distances from sklearn.model_selection import train_test_split -from sklearn.neighbors import NearestNeighbors from freqtrade.configuration import TimeRange -from freqtrade.constants import Config +from freqtrade.constants import DOCS_LINK, Config from freqtrade.data.converter import reduce_dataframe_footprint from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_seconds @@ -81,11 +76,12 @@ class FreqaiDataKitchen: self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair - - self.svm_model: linear_model.SGDOneClassSVM = None self.keras: bool = self.freqai_config.get("keras", False) self.set_all_pairs() self.backtest_live_models = config.get("freqai_backtest_live_models", False) + self.feature_pipeline = Pipeline() + self.label_pipeline = Pipeline() + self.DI_values: npt.NDArray = np.array([]) if not self.live: self.full_path = self.get_full_models_path(self.config) @@ -227,13 +223,7 @@ class FreqaiDataKitchen: drop_index = pd.isnull(filtered_df).any(axis=1) # get the rows that have NaNs, drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement. if (training_filter): - const_cols = list((filtered_df.nunique() == 1).loc[lambda x: x].index) - if const_cols: - filtered_df = filtered_df.filter(filtered_df.columns.difference(const_cols)) - self.data['constant_features_list'] = const_cols - logger.warning(f"Removed features {const_cols} with constant values.") - else: - self.data['constant_features_list'] = [] + # we don't care about total row number (total no. datapoints) in training, we only care # about removing any row with NaNs # if labels has multiple columns (user wants to train multiple modelEs), we detect here @@ -264,8 +254,7 @@ class FreqaiDataKitchen: self.data["filter_drop_index_training"] = drop_index else: - if 'constant_features_list' in self.data and len(self.data['constant_features_list']): - filtered_df = self.check_pred_labels(filtered_df) + # we are backtesting so we need to preserve row number to send back to strategy, # so now we use do_predict to avoid any prediction based on a NaN drop_index = pd.isnull(filtered_df).any(axis=1) @@ -307,107 +296,6 @@ class FreqaiDataKitchen: return self.data_dictionary - def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]: - """ - Normalize all data in the data_dictionary according to the training dataset - :param data_dictionary: dictionary containing the cleaned and - split training/test data/labels - :returns: - :data_dictionary: updated dictionary with standardized values. - """ - - # standardize the data by training stats - train_max = data_dictionary["train_features"].max() - train_min = data_dictionary["train_features"].min() - data_dictionary["train_features"] = ( - 2 * (data_dictionary["train_features"] - train_min) / (train_max - train_min) - 1 - ) - data_dictionary["test_features"] = ( - 2 * (data_dictionary["test_features"] - train_min) / (train_max - train_min) - 1 - ) - - for item in train_max.keys(): - self.data[item + "_max"] = train_max[item] - self.data[item + "_min"] = train_min[item] - - for item in data_dictionary["train_labels"].keys(): - if data_dictionary["train_labels"][item].dtype == object: - continue - train_labels_max = data_dictionary["train_labels"][item].max() - train_labels_min = data_dictionary["train_labels"][item].min() - data_dictionary["train_labels"][item] = ( - 2 - * (data_dictionary["train_labels"][item] - train_labels_min) - / (train_labels_max - train_labels_min) - - 1 - ) - if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - data_dictionary["test_labels"][item] = ( - 2 - * (data_dictionary["test_labels"][item] - train_labels_min) - / (train_labels_max - train_labels_min) - - 1 - ) - - self.data[f"{item}_max"] = train_labels_max - self.data[f"{item}_min"] = train_labels_min - return data_dictionary - - def normalize_single_dataframe(self, df: DataFrame) -> DataFrame: - - train_max = df.max() - train_min = df.min() - df = ( - 2 * (df - train_min) / (train_max - train_min) - 1 - ) - - for item in train_max.keys(): - self.data[item + "_max"] = train_max[item] - self.data[item + "_min"] = train_min[item] - - return df - - def normalize_data_from_metadata(self, df: DataFrame) -> DataFrame: - """ - Normalize a set of data using the mean and standard deviation from - the associated training data. - :param df: Dataframe to be standardized - """ - - train_max = [None] * len(df.keys()) - train_min = [None] * len(df.keys()) - - for i, item in enumerate(df.keys()): - train_max[i] = self.data[f"{item}_max"] - train_min[i] = self.data[f"{item}_min"] - - train_max_series = pd.Series(train_max, index=df.keys()) - train_min_series = pd.Series(train_min, index=df.keys()) - - df = ( - 2 * (df - train_min_series) / (train_max_series - train_min_series) - 1 - ) - - return df - - def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame: - """ - Denormalize a set of data using the mean and standard deviation from - the associated training data. - :param df: Dataframe of predictions to be denormalized - """ - - for label in df.columns: - if df[label].dtype == object or label in self.unique_class_list: - continue - df[label] = ( - (df[label] + 1) - * (self.data[f"{label}_max"] - self.data[f"{label}_min"]) - / 2 - ) + self.data[f"{label}_min"] - - return df - def split_timerange( self, tr: str, train_split: int = 28, bt_split: float = 7 ) -> Tuple[list, list]: @@ -452,9 +340,7 @@ class FreqaiDataKitchen: tr_training_list_timerange.append(copy.deepcopy(timerange_train)) # associated backtest period - timerange_backtest.startts = timerange_train.stopts - timerange_backtest.stopts = timerange_backtest.startts + int(bt_period) if timerange_backtest.stopts > config_timerange.stopts: @@ -485,426 +371,6 @@ class FreqaiDataKitchen: return df - def check_pred_labels(self, df_predictions: DataFrame) -> DataFrame: - """ - Check that prediction feature labels match training feature labels. - :param df_predictions: incoming predictions - """ - constant_labels = self.data['constant_features_list'] - df_predictions = df_predictions.filter( - df_predictions.columns.difference(constant_labels) - ) - logger.warning( - f"Removed {len(constant_labels)} features from prediction features, " - f"these were considered constant values during most recent training." - ) - - return df_predictions - - def principal_component_analysis(self) -> None: - """ - Performs Principal Component Analysis on the data for dimensionality reduction - and outlier detection (see self.remove_outliers()) - No parameters or returns, it acts on the data_dictionary held by the DataHandler. - """ - - from sklearn.decomposition import PCA # avoid importing if we dont need it - - pca = PCA(0.999) - pca = pca.fit(self.data_dictionary["train_features"]) - n_keep_components = pca.n_components_ - self.data["n_kept_components"] = n_keep_components - n_components = self.data_dictionary["train_features"].shape[1] - logger.info("reduced feature dimension by %s", n_components - n_keep_components) - logger.info("explained variance %f", np.sum(pca.explained_variance_ratio_)) - - train_components = pca.transform(self.data_dictionary["train_features"]) - self.data_dictionary["train_features"] = pd.DataFrame( - data=train_components, - columns=["PC" + str(i) for i in range(0, n_keep_components)], - index=self.data_dictionary["train_features"].index, - ) - # normalsing transformed training features - self.data_dictionary["train_features"] = self.normalize_single_dataframe( - self.data_dictionary["train_features"]) - - # keeping a copy of the non-transformed features so we can check for errors during - # model load from disk - self.data["training_features_list_raw"] = copy.deepcopy(self.training_features_list) - self.training_features_list = self.data_dictionary["train_features"].columns - - if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - test_components = pca.transform(self.data_dictionary["test_features"]) - self.data_dictionary["test_features"] = pd.DataFrame( - data=test_components, - columns=["PC" + str(i) for i in range(0, n_keep_components)], - index=self.data_dictionary["test_features"].index, - ) - # normalise transformed test feature to transformed training features - self.data_dictionary["test_features"] = self.normalize_data_from_metadata( - self.data_dictionary["test_features"]) - - self.data["n_kept_components"] = n_keep_components - self.pca = pca - - logger.info(f"PCA reduced total features from {n_components} to {n_keep_components}") - - if not self.data_path.is_dir(): - self.data_path.mkdir(parents=True, exist_ok=True) - - return None - - def pca_transform(self, filtered_dataframe: DataFrame) -> None: - """ - Use an existing pca transform to transform data into components - :param filtered_dataframe: DataFrame = the cleaned dataframe - """ - pca_components = self.pca.transform(filtered_dataframe) - self.data_dictionary["prediction_features"] = pd.DataFrame( - data=pca_components, - columns=["PC" + str(i) for i in range(0, self.data["n_kept_components"])], - index=filtered_dataframe.index, - ) - # normalise transformed predictions to transformed training features - self.data_dictionary["prediction_features"] = self.normalize_data_from_metadata( - self.data_dictionary["prediction_features"]) - - def compute_distances(self) -> float: - """ - Compute distances between each training point and every other training - point. This metric defines the neighborhood of trained data and is used - for prediction confidence in the Dissimilarity Index - """ - # logger.info("computing average mean distance for all training points") - pairwise = pairwise_distances( - self.data_dictionary["train_features"], n_jobs=self.thread_count) - # remove the diagonal distances which are itself distances ~0 - np.fill_diagonal(pairwise, np.NaN) - pairwise = pairwise.reshape(-1, 1) - avg_mean_dist = pairwise[~np.isnan(pairwise)].mean() - - return avg_mean_dist - - def get_outlier_percentage(self, dropped_pts: npt.NDArray) -> float: - """ - Check if more than X% of points werer dropped during outlier detection. - """ - outlier_protection_pct = self.freqai_config["feature_parameters"].get( - "outlier_protection_percentage", 30) - outlier_pct = (dropped_pts.sum() / len(dropped_pts)) * 100 - if outlier_pct >= outlier_protection_pct: - return outlier_pct - else: - return 0.0 - - def use_SVM_to_remove_outliers(self, predict: bool) -> None: - """ - Build/inference a Support Vector Machine to detect outliers - in training data and prediction - :param predict: bool = If true, inference an existing SVM model, else construct one - """ - - if self.keras: - logger.warning( - "SVM outlier removal not currently supported for Keras based models. " - "Skipping user requested function." - ) - if predict: - self.do_predict = np.ones(len(self.data_dictionary["prediction_features"])) - return - - if predict: - if not self.svm_model: - logger.warning("No svm model available for outlier removal") - return - y_pred = self.svm_model.predict(self.data_dictionary["prediction_features"]) - do_predict = np.where(y_pred == -1, 0, y_pred) - - if (len(do_predict) - do_predict.sum()) > 0: - logger.info(f"SVM tossed {len(do_predict) - do_predict.sum()} predictions.") - self.do_predict += do_predict - self.do_predict -= 1 - - else: - # use SGDOneClassSVM to increase speed? - svm_params = self.freqai_config["feature_parameters"].get( - "svm_params", {"shuffle": False, "nu": 0.1}) - self.svm_model = linear_model.SGDOneClassSVM(**svm_params).fit( - self.data_dictionary["train_features"] - ) - y_pred = self.svm_model.predict(self.data_dictionary["train_features"]) - kept_points = np.where(y_pred == -1, 0, y_pred) - # keep_index = np.where(y_pred == 1) - outlier_pct = self.get_outlier_percentage(1 - kept_points) - if outlier_pct: - logger.warning( - f"SVM detected {outlier_pct:.2f}% of the points as outliers. " - f"Keeping original dataset." - ) - self.svm_model = None - return - - self.data_dictionary["train_features"] = self.data_dictionary["train_features"][ - (y_pred == 1) - ] - self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][ - (y_pred == 1) - ] - self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][ - (y_pred == 1) - ] - - logger.info( - f"SVM tossed {len(y_pred) - kept_points.sum()}" - f" train points from {len(y_pred)} total points." - ) - - # same for test data - # TODO: This (and the part above) could be refactored into a separate function - # to reduce code duplication - if self.freqai_config['data_split_parameters'].get('test_size', 0.1) != 0: - y_pred = self.svm_model.predict(self.data_dictionary["test_features"]) - kept_points = np.where(y_pred == -1, 0, y_pred) - self.data_dictionary["test_features"] = self.data_dictionary["test_features"][ - (y_pred == 1) - ] - self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][( - y_pred == 1)] - self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][ - (y_pred == 1) - ] - - logger.info( - f"{self.pair}: SVM tossed {len(y_pred) - kept_points.sum()}" - f" test points from {len(y_pred)} total points." - ) - - return - - def use_DBSCAN_to_remove_outliers(self, predict: bool, eps=None) -> None: - """ - Use DBSCAN to cluster training data and remove "noisy" data (read outliers). - User controls this via the config param `DBSCAN_outlier_pct` which indicates the - pct of training data that they want to be considered outliers. - :param predict: bool = If False (training), iterate to find the best hyper parameters - to match user requested outlier percent target. - If True (prediction), use the parameters determined from - the previous training to estimate if the current prediction point - is an outlier. - """ - - if predict: - if not self.data['DBSCAN_eps']: - return - train_ft_df = self.data_dictionary['train_features'] - pred_ft_df = self.data_dictionary['prediction_features'] - num_preds = len(pred_ft_df) - df = pd.concat([train_ft_df, pred_ft_df], axis=0, ignore_index=True) - clustering = DBSCAN(eps=self.data['DBSCAN_eps'], - min_samples=self.data['DBSCAN_min_samples'], - n_jobs=self.thread_count - ).fit(df) - do_predict = np.where(clustering.labels_[-num_preds:] == -1, 0, 1) - - if (len(do_predict) - do_predict.sum()) > 0: - logger.info(f"DBSCAN tossed {len(do_predict) - do_predict.sum()} predictions") - self.do_predict += do_predict - self.do_predict -= 1 - - else: - - def normalise_distances(distances): - normalised_distances = (distances - distances.min()) / \ - (distances.max() - distances.min()) - return normalised_distances - - def rotate_point(origin, point, angle): - # rotate a point counterclockwise by a given angle (in radians) - # around a given origin - x = origin[0] + cos(angle) * (point[0] - origin[0]) - \ - sin(angle) * (point[1] - origin[1]) - y = origin[1] + sin(angle) * (point[0] - origin[0]) + \ - cos(angle) * (point[1] - origin[1]) - return (x, y) - - MinPts = int(len(self.data_dictionary['train_features'].index) * 0.25) - # measure pairwise distances to nearest neighbours - neighbors = NearestNeighbors( - n_neighbors=MinPts, n_jobs=self.thread_count) - neighbors_fit = neighbors.fit(self.data_dictionary['train_features']) - distances, _ = neighbors_fit.kneighbors(self.data_dictionary['train_features']) - distances = np.sort(distances, axis=0).mean(axis=1) - - normalised_distances = normalise_distances(distances) - x_range = np.linspace(0, 1, len(distances)) - line = np.linspace(normalised_distances[0], - normalised_distances[-1], len(normalised_distances)) - deflection = np.abs(normalised_distances - line) - max_deflection_loc = np.where(deflection == deflection.max())[0][0] - origin = x_range[max_deflection_loc], line[max_deflection_loc] - point = x_range[max_deflection_loc], normalised_distances[max_deflection_loc] - rot_angle = np.pi / 4 - elbow_loc = rotate_point(origin, point, rot_angle) - - epsilon = elbow_loc[1] * (distances[-1] - distances[0]) + distances[0] - - clustering = DBSCAN(eps=epsilon, min_samples=MinPts, - n_jobs=int(self.thread_count)).fit( - self.data_dictionary['train_features'] - ) - - logger.info(f'DBSCAN found eps of {epsilon:.2f}.') - - self.data['DBSCAN_eps'] = epsilon - self.data['DBSCAN_min_samples'] = MinPts - dropped_points = np.where(clustering.labels_ == -1, 1, 0) - - outlier_pct = self.get_outlier_percentage(dropped_points) - if outlier_pct: - logger.warning( - f"DBSCAN detected {outlier_pct:.2f}% of the points as outliers. " - f"Keeping original dataset." - ) - self.data['DBSCAN_eps'] = 0 - return - - self.data_dictionary['train_features'] = self.data_dictionary['train_features'][ - (clustering.labels_ != -1) - ] - self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][ - (clustering.labels_ != -1) - ] - self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][ - (clustering.labels_ != -1) - ] - - logger.info( - f"DBSCAN tossed {dropped_points.sum()}" - f" train points from {len(clustering.labels_)}" - ) - - return - - def compute_inlier_metric(self, set_='train') -> None: - """ - Compute inlier metric from backwards distance distributions. - This metric defines how well features from a timepoint fit - into previous timepoints. - """ - - def normalise(dataframe: DataFrame, key: str) -> DataFrame: - if set_ == 'train': - min_value = dataframe.min() - max_value = dataframe.max() - self.data[f'{key}_min'] = min_value - self.data[f'{key}_max'] = max_value - else: - min_value = self.data[f'{key}_min'] - max_value = self.data[f'{key}_max'] - return (dataframe - min_value) / (max_value - min_value) - - no_prev_pts = self.freqai_config["feature_parameters"]["inlier_metric_window"] - - if set_ == 'train': - compute_df = copy.deepcopy(self.data_dictionary['train_features']) - elif set_ == 'test': - compute_df = copy.deepcopy(self.data_dictionary['test_features']) - else: - compute_df = copy.deepcopy(self.data_dictionary['prediction_features']) - - compute_df_reindexed = compute_df.reindex( - index=np.flip(compute_df.index) - ) - - pairwise = pd.DataFrame( - np.triu( - pairwise_distances(compute_df_reindexed, n_jobs=self.thread_count) - ), - columns=compute_df_reindexed.index, - index=compute_df_reindexed.index - ) - pairwise = pairwise.round(5) - - column_labels = [ - '{}{}'.format('d', i) for i in range(1, no_prev_pts + 1) - ] - distances = pd.DataFrame( - columns=column_labels, index=compute_df.index - ) - - for index in compute_df.index[no_prev_pts:]: - current_row = pairwise.loc[[index]] - current_row_no_zeros = current_row.loc[ - :, (current_row != 0).any(axis=0) - ] - distances.loc[[index]] = current_row_no_zeros.iloc[ - :, :no_prev_pts - ] - distances = distances.replace([np.inf, -np.inf], np.nan) - drop_index = pd.isnull(distances).any(axis=1) - distances = distances[drop_index == 0] - - inliers = pd.DataFrame(index=distances.index) - for key in distances.keys(): - current_distances = distances[key].dropna() - current_distances = normalise(current_distances, key) - if set_ == 'train': - fit_params = stats.weibull_min.fit(current_distances) - self.data[f'{key}_fit_params'] = fit_params - else: - fit_params = self.data[f'{key}_fit_params'] - quantiles = stats.weibull_min.cdf(current_distances, *fit_params) - - df_inlier = pd.DataFrame( - {key: quantiles}, index=distances.index - ) - inliers = pd.concat( - [inliers, df_inlier], axis=1 - ) - - inlier_metric = pd.DataFrame( - data=inliers.sum(axis=1) / no_prev_pts, - columns=['%-inlier_metric'], - index=compute_df.index - ) - - inlier_metric = (2 * (inlier_metric - inlier_metric.min()) / - (inlier_metric.max() - inlier_metric.min()) - 1) - - if set_ in ('train', 'test'): - inlier_metric = inlier_metric.iloc[no_prev_pts:] - compute_df = compute_df.iloc[no_prev_pts:] - self.remove_beginning_points_from_data_dict(set_, no_prev_pts) - self.data_dictionary[f'{set_}_features'] = pd.concat( - [compute_df, inlier_metric], axis=1) - else: - self.data_dictionary['prediction_features'] = pd.concat( - [compute_df, inlier_metric], axis=1) - self.data_dictionary['prediction_features'].fillna(0, inplace=True) - - logger.info('Inlier metric computed and added to features.') - - return None - - def remove_beginning_points_from_data_dict(self, set_='train', no_prev_pts: int = 10): - features = self.data_dictionary[f'{set_}_features'] - weights = self.data_dictionary[f'{set_}_weights'] - labels = self.data_dictionary[f'{set_}_labels'] - self.data_dictionary[f'{set_}_weights'] = weights[no_prev_pts:] - self.data_dictionary[f'{set_}_features'] = features.iloc[no_prev_pts:] - self.data_dictionary[f'{set_}_labels'] = labels.iloc[no_prev_pts:] - - def add_noise_to_training_features(self) -> None: - """ - Add noise to train features to reduce the risk of overfitting. - """ - mu = 0 # no shift - sigma = self.freqai_config["feature_parameters"]["noise_standard_deviation"] - compute_df = self.data_dictionary['train_features'] - noise = np.random.normal(mu, sigma, [compute_df.shape[0], compute_df.shape[1]]) - self.data_dictionary['train_features'] += noise - return - def find_features(self, dataframe: DataFrame) -> None: """ Find features in the strategy provided dataframe @@ -925,37 +391,6 @@ class FreqaiDataKitchen: labels = [c for c in column_names if "&" in c] self.label_list = labels - def check_if_pred_in_training_spaces(self) -> None: - """ - Compares the distance from each prediction point to each training data - point. It uses this information to estimate a Dissimilarity Index (DI) - and avoid making predictions on any points that are too far away - from the training data set. - """ - - distance = pairwise_distances( - self.data_dictionary["train_features"], - self.data_dictionary["prediction_features"], - n_jobs=self.thread_count, - ) - - self.DI_values = distance.min(axis=0) / self.data["avg_mean_dist"] - - do_predict = np.where( - self.DI_values < self.freqai_config["feature_parameters"]["DI_threshold"], - 1, - 0, - ) - - if (len(do_predict) - do_predict.sum()) > 0: - logger.info( - f"{self.pair}: DI tossed {len(do_predict) - do_predict.sum()} predictions for " - "being too far from training data." - ) - - self.do_predict += do_predict - self.do_predict -= 1 - def set_weights_higher_recent(self, num_weights: int) -> npt.ArrayLike: """ Set weights so that recent data is more heavily weighted during @@ -1325,9 +760,9 @@ class FreqaiDataKitchen: " which was deprecated on March 1, 2023. Please refer " "to the strategy migration guide to use the new " "feature_engineering_* methods: \n" - "https://www.freqtrade.io/en/stable/strategy_migration/#freqai-strategy \n" + f"{DOCS_LINK}/strategy_migration/#freqai-strategy \n" "And the feature_engineering_* documentation: \n" - "https://www.freqtrade.io/en/latest/freqai-feature-engineering/" + f"{DOCS_LINK}/freqai-feature-engineering/" ) tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes") @@ -1515,3 +950,32 @@ class FreqaiDataKitchen: timerange.startts += buffer * timeframe_to_seconds(self.config["timeframe"]) return timerange + + # deprecated functions + def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]: + """ + Deprecation warning, migration assistance + """ + logger.warning(f"Your custom IFreqaiModel relies on the deprecated" + " data pipeline. Please update your model to use the new data pipeline." + " This can be achieved by following the migration guide at " + f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline " + "We added a basic pipeline for you, but this will be removed " + "in a future version.") + + return data_dictionary + + def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame: + """ + Deprecation warning, migration assistance + """ + logger.warning(f"Your custom IFreqaiModel relies on the deprecated" + " data pipeline. Please update your model to use the new data pipeline." + " This can be achieved by following the migration guide at " + f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline " + "We added a basic pipeline for you, but this will be removed " + "in a future version.") + + pred_df, _, _ = self.label_pipeline.inverse_transform(df) + + return pred_df diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 9cfda05ee..36c94130c 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -7,14 +7,18 @@ from datetime import datetime, timezone from pathlib import Path from typing import Any, Dict, List, Literal, Optional, Tuple +import datasieve.transforms as ds import numpy as np import pandas as pd import psutil +from datasieve.pipeline import Pipeline +from datasieve.transforms import SKLearnWrapper from numpy.typing import NDArray from pandas import DataFrame +from sklearn.preprocessing import MinMaxScaler from freqtrade.configuration import TimeRange -from freqtrade.constants import Config +from freqtrade.constants import DOCS_LINK, Config from freqtrade.data.dataprovider import DataProvider from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException @@ -503,68 +507,43 @@ class IFreqaiModel(ABC): "feature_engineering_* functions" ) - def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None: - """ - Base data cleaning method for train. - Functions here improve/modify the input data by identifying outliers, - computing additional metrics, adding noise, reducing dimensionality etc. - """ - + def define_data_pipeline(self, threads=-1) -> Pipeline: ft_params = self.freqai_info["feature_parameters"] + pipe_steps = [ + ('const', ds.VarianceThreshold(threshold=0)), + ('scaler', SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1)))) + ] - if ft_params.get('inlier_metric_window', 0): - dk.compute_inlier_metric(set_='train') - if self.freqai_info["data_split_parameters"]["test_size"] > 0: - dk.compute_inlier_metric(set_='test') - - if ft_params.get( - "principal_component_analysis", False - ): - dk.principal_component_analysis() + if ft_params.get("principal_component_analysis", False): + pipe_steps.append(('pca', ds.PCA(n_components=0.999))) + pipe_steps.append(('post-pca-scaler', + SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1))))) if ft_params.get("use_SVM_to_remove_outliers", False): - dk.use_SVM_to_remove_outliers(predict=False) + svm_params = ft_params.get( + "svm_params", {"shuffle": False, "nu": 0.01}) + pipe_steps.append(('svm', ds.SVMOutlierExtractor(**svm_params))) - if ft_params.get("DI_threshold", 0): - dk.data["avg_mean_dist"] = dk.compute_distances() + di = ft_params.get("DI_threshold", 0) + if di: + pipe_steps.append(('di', ds.DissimilarityIndex(di_threshold=di, n_jobs=threads))) if ft_params.get("use_DBSCAN_to_remove_outliers", False): - if dk.pair in self.dd.old_DBSCAN_eps: - eps = self.dd.old_DBSCAN_eps[dk.pair] - else: - eps = None - dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps) - self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps'] + pipe_steps.append(('dbscan', ds.DBSCAN(n_jobs=threads))) - if self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0): - dk.add_noise_to_training_features() + sigma = self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0) + if sigma: + pipe_steps.append(('noise', ds.Noise(sigma=sigma))) - def data_cleaning_predict(self, dk: FreqaiDataKitchen) -> None: - """ - Base data cleaning method for predict. - Functions here are complementary to the functions of data_cleaning_train. - """ - ft_params = self.freqai_info["feature_parameters"] + return Pipeline(pipe_steps) - # ensure user is feeding the correct indicators to the model - self.check_if_feature_list_matches_strategy(dk) + def define_label_pipeline(self, threads=-1) -> Pipeline: - if ft_params.get('inlier_metric_window', 0): - dk.compute_inlier_metric(set_='predict') + label_pipeline = Pipeline([ + ('scaler', SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1)))) + ]) - if ft_params.get( - "principal_component_analysis", False - ): - dk.pca_transform(dk.data_dictionary['prediction_features']) - - if ft_params.get("use_SVM_to_remove_outliers", False): - dk.use_SVM_to_remove_outliers(predict=True) - - if ft_params.get("DI_threshold", 0): - dk.check_if_pred_in_training_spaces() - - if ft_params.get("use_DBSCAN_to_remove_outliers", False): - dk.use_DBSCAN_to_remove_outliers(predict=True) + return label_pipeline def model_exists(self, dk: FreqaiDataKitchen) -> bool: """ @@ -576,8 +555,6 @@ class IFreqaiModel(ABC): """ if self.dd.model_type == 'joblib': file_type = ".joblib" - elif self.dd.model_type == 'keras': - file_type = ".h5" elif self.dd.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]: file_type = ".zip" @@ -701,7 +678,7 @@ class IFreqaiModel(ABC): # # for keras type models, the conv_window needs to be prepended so # # viewing is correct in frequi - if self.freqai_info.get('keras', False) or self.ft_params.get('inlier_metric_window', 0): + if self.ft_params.get('inlier_metric_window', 0): n_lost_points = self.freqai_info.get('conv_width', 2) zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))), columns=hist_preds_df.columns) @@ -991,3 +968,50 @@ class IFreqaiModel(ABC): :do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove data (NaNs) or felt uncertain about data (i.e. SVM and/or DI index) """ + + # deprecated functions + def data_cleaning_train(self, dk: FreqaiDataKitchen, pair: str): + """ + throw deprecation warning if this function is called + """ + logger.warning(f"Your model {self.__class__.__name__} relies on the deprecated" + " data pipeline. Please update your model to use the new data pipeline." + " This can be achieved by following the migration guide at " + f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline") + dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) + dd = dk.data_dictionary + (dd["train_features"], + dd["train_labels"], + dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], + dd["train_labels"], + dd["train_weights"]) + + (dd["test_features"], + dd["test_labels"], + dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], + dd["test_labels"], + dd["test_weights"]) + + dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count) + + dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) + dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) + return + + def data_cleaning_predict(self, dk: FreqaiDataKitchen, pair: str): + """ + throw deprecation warning if this function is called + """ + logger.warning(f"Your model {self.__class__.__name__} relies on the deprecated" + " data pipeline. Please update your model to use the new data pipeline." + " This can be achieved by following the migration guide at " + f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline") + dd = dk.data_dictionary + dd["predict_features"], outliers, _ = dk.feature_pipeline.transform( + dd["predict_features"], outlier_check=True) + if self.freqai_info.get("DI_threshold", 0) > 0: + dk.DI_values = dk.feature_pipeline["di"].di_values + else: + dk.DI_values = np.zeros(outliers.shape[0]) + dk.do_predict = outliers + return diff --git a/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py b/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py index b3b684c14..a76bab05c 100644 --- a/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py +++ b/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py @@ -103,13 +103,13 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor): """ dk.find_features(unfiltered_df) - filtered_df, _ = dk.filter_features( + dk.data_dictionary["prediction_features"], _ = dk.filter_features( unfiltered_df, dk.training_features_list, training_filter=False ) - filtered_df = dk.normalize_data_from_metadata(filtered_df) - dk.data_dictionary["prediction_features"] = filtered_df - self.data_cleaning_predict(dk) + dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( + dk.data_dictionary["prediction_features"], outlier_check=True) + x = self.data_convertor.convert_x( dk.data_dictionary["prediction_features"], device=self.device @@ -131,7 +131,13 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor): yb = yb.cpu().squeeze() pred_df = pd.DataFrame(yb.detach().numpy(), columns=dk.label_list) - pred_df = dk.denormalize_labels_from_metadata(pred_df) + pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df) + + if self.freqai_info.get("DI_threshold", 0) > 0: + dk.DI_values = dk.feature_pipeline["di"].di_values + else: + dk.DI_values = np.zeros(outliers.shape[0]) + dk.do_predict = outliers if x.shape[1] > 1: zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))), diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 9f0b2d436..f014da602 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -2,7 +2,8 @@ import logging from typing import Any, Dict from pandas import DataFrame -from stable_baselines3.common.callbacks import EvalCallback +from sb3_contrib.common.maskable.callbacks import MaskableEvalCallback +from sb3_contrib.common.maskable.utils import is_masking_supported from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -55,9 +56,11 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): env_info=env_info) for i in range(self.max_threads)])) - self.eval_callback = EvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=str(dk.data_path)) + self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True, + render=False, eval_freq=eval_freq, + best_model_save_path=str(dk.data_path), + use_masking=(self.model_type == 'MaskablePPO' and + is_masking_supported(self.eval_env))) # TENSORBOARD CALLBACK DOES NOT RECOMMENDED TO USE WITH MULTIPLE ENVS, # IT WILL RETURN FALSE INFORMATIONS, NEVERTHLESS NOT THREAD SAFE WITH SB3!!! diff --git a/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py b/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py index 1aefbf19a..f43585ab0 100644 --- a/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py +++ b/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py @@ -5,6 +5,7 @@ from xgboost import XGBRFRegressor from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel from freqtrade.freqai.data_kitchen import FreqaiDataKitchen +from freqtrade.freqai.tensorboard import TBCallback logger = logging.getLogger(__name__) @@ -44,7 +45,10 @@ class XGBoostRFRegressor(BaseRegressionModel): model = XGBRFRegressor(**self.model_training_parameters) + model.set_params(callbacks=[TBCallback(dk.data_path)], activate=self.activate_tensorboard) model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set, sample_weight_eval_set=eval_weights, xgb_model=xgb_model) + # set the callbacks to empty so that we can serialize to disk later + model.set_params(callbacks=[]) return model diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index fc4c65caf..42ac85fdb 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -1302,6 +1302,10 @@ class FreqtradeBot(LoggingMixin): f"(orderid:{order['id']}) in order to add another one ...") self.cancel_stoploss_on_exchange(trade) + if not trade.is_open: + logger.warning( + f"Trade {trade} is closed, not creating trailing stoploss order.") + return # Create new stoploss order if not self.create_stoploss_order(trade=trade, stop_price=stoploss_norm): diff --git a/freqtrade/loggers/__init__.py b/freqtrade/loggers/__init__.py index 58f207608..390f210c0 100644 --- a/freqtrade/loggers/__init__.py +++ b/freqtrade/loggers/__init__.py @@ -5,6 +5,7 @@ from logging.handlers import RotatingFileHandler, SysLogHandler from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.loggers.buffering_handler import FTBufferingHandler +from freqtrade.loggers.set_log_levels import set_loggers from freqtrade.loggers.std_err_stream_handler import FTStdErrStreamHandler @@ -16,29 +17,6 @@ bufferHandler = FTBufferingHandler(1000) bufferHandler.setFormatter(Formatter(LOGFORMAT)) -def _set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None: - """ - Set the logging level for third party libraries - :return: None - """ - - logging.getLogger('requests').setLevel( - logging.INFO if verbosity <= 1 else logging.DEBUG - ) - logging.getLogger("urllib3").setLevel( - logging.INFO if verbosity <= 1 else logging.DEBUG - ) - logging.getLogger('ccxt.base.exchange').setLevel( - logging.INFO if verbosity <= 2 else logging.DEBUG - ) - logging.getLogger('telegram').setLevel(logging.INFO) - logging.getLogger('httpx').setLevel(logging.INFO) - - logging.getLogger('werkzeug').setLevel( - logging.ERROR if api_verbosity == 'error' else logging.INFO - ) - - def get_existing_handlers(handlertype): """ Returns Existing handler or None (if the handler has not yet been added to the root handlers). @@ -115,6 +93,6 @@ def setup_logging(config: Config) -> None: logging.root.addHandler(handler_rf) logging.root.setLevel(logging.INFO if verbosity < 1 else logging.DEBUG) - _set_loggers(verbosity, config.get('api_server', {}).get('verbosity', 'info')) + set_loggers(verbosity, config.get('api_server', {}).get('verbosity', 'info')) logger.info('Verbosity set to %s', verbosity) diff --git a/freqtrade/loggers/set_log_levels.py b/freqtrade/loggers/set_log_levels.py new file mode 100644 index 000000000..7311fa0a0 --- /dev/null +++ b/freqtrade/loggers/set_log_levels.py @@ -0,0 +1,55 @@ + +import logging + + +logger = logging.getLogger(__name__) + + +def set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None: + """ + Set the logging level for third party libraries + :return: None + """ + + logging.getLogger('requests').setLevel( + logging.INFO if verbosity <= 1 else logging.DEBUG + ) + logging.getLogger("urllib3").setLevel( + logging.INFO if verbosity <= 1 else logging.DEBUG + ) + logging.getLogger('ccxt.base.exchange').setLevel( + logging.INFO if verbosity <= 2 else logging.DEBUG + ) + logging.getLogger('telegram').setLevel(logging.INFO) + logging.getLogger('httpx').setLevel(logging.WARNING) + + logging.getLogger('werkzeug').setLevel( + logging.ERROR if api_verbosity == 'error' else logging.INFO + ) + + +__BIAS_TESTER_LOGGERS = [ + 'freqtrade.resolvers', + 'freqtrade.strategy.hyper', + 'freqtrade.configuration.config_validation', +] + + +def reduce_verbosity_for_bias_tester() -> None: + """ + Reduce verbosity for bias tester. + It loads the same strategy several times, which would spam the log. + """ + logger.info("Reducing verbosity for bias tester.") + for logger_name in __BIAS_TESTER_LOGGERS: + logging.getLogger(logger_name).setLevel(logging.WARNING) + + +def restore_verbosity_for_bias_tester() -> None: + """ + Restore verbosity after bias tester. + """ + logger.info("Restoring log verbosity.") + log_level = logging.NOTSET + for logger_name in __BIAS_TESTER_LOGGERS: + logging.getLogger(logger_name).setLevel(log_level) diff --git a/freqtrade/mixins/logging_mixin.py b/freqtrade/mixins/logging_mixin.py index 06935d5f6..31b49ba55 100644 --- a/freqtrade/mixins/logging_mixin.py +++ b/freqtrade/mixins/logging_mixin.py @@ -3,7 +3,7 @@ from typing import Callable from cachetools import TTLCache, cached -class LoggingMixin(): +class LoggingMixin: """ Logging Mixin Shows similar messages only once every `refresh_period`. diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index d77fc469b..4a5536e84 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -24,6 +24,7 @@ from freqtrade.enums import (BacktestState, CandleType, ExitCheckTuple, ExitType from freqtrade.exceptions import DependencyException, OperationalException from freqtrade.exchange import (amount_to_contract_precision, price_to_precision, timeframe_to_minutes, timeframe_to_seconds) +from freqtrade.exchange.exchange import Exchange from freqtrade.mixins import LoggingMixin from freqtrade.optimize.backtest_caching import get_strategy_run_id from freqtrade.optimize.bt_progress import BTProgress @@ -72,7 +73,7 @@ class Backtesting: backtesting.start() """ - def __init__(self, config: Config) -> None: + def __init__(self, config: Config, exchange: Optional[Exchange] = None) -> None: LoggingMixin.show_output = False self.config = config @@ -89,7 +90,10 @@ class Backtesting: self.rejected_df: Dict[str, Dict] = {} self._exchange_name = self.config['exchange']['name'] - self.exchange = ExchangeResolver.load_exchange(self.config, load_leverage_tiers=True) + if not exchange: + exchange = ExchangeResolver.load_exchange(self.config, load_leverage_tiers=True) + self.exchange = exchange + self.dataprovider = DataProvider(self.config, self.exchange) if self.config.get('strategy_list'): @@ -114,16 +118,7 @@ class Backtesting: self.timeframe_min = timeframe_to_minutes(self.timeframe) self.init_backtest_detail() self.pairlists = PairListManager(self.exchange, self.config, self.dataprovider) - if 'VolumePairList' in self.pairlists.name_list: - raise OperationalException("VolumePairList not allowed for backtesting. " - "Please use StaticPairList instead.") - if 'PerformanceFilter' in self.pairlists.name_list: - raise OperationalException("PerformanceFilter not allowed for backtesting.") - - if len(self.strategylist) > 1 and 'PrecisionFilter' in self.pairlists.name_list: - raise OperationalException( - "PrecisionFilter not allowed for backtesting multiple strategies." - ) + self._validate_pairlists_for_backtesting() self.dataprovider.add_pairlisthandler(self.pairlists) self.pairlists.refresh_pairlist() @@ -164,6 +159,18 @@ class Backtesting: self.init_backtest() + def _validate_pairlists_for_backtesting(self): + if 'VolumePairList' in self.pairlists.name_list: + raise OperationalException("VolumePairList not allowed for backtesting. " + "Please use StaticPairList instead.") + if 'PerformanceFilter' in self.pairlists.name_list: + raise OperationalException("PerformanceFilter not allowed for backtesting.") + + if len(self.strategylist) > 1 and 'PrecisionFilter' in self.pairlists.name_list: + raise OperationalException( + "PrecisionFilter not allowed for backtesting multiple strategies." + ) + @staticmethod def cleanup(): LoggingMixin.show_output = True diff --git a/freqtrade/optimize/hyperopt_tools.py b/freqtrade/optimize/hyperopt_tools.py index 1e7befdf6..bc5b85309 100644 --- a/freqtrade/optimize/hyperopt_tools.py +++ b/freqtrade/optimize/hyperopt_tools.py @@ -35,7 +35,7 @@ def hyperopt_serializer(x): return str(x) -class HyperoptStateContainer(): +class HyperoptStateContainer: """ Singleton class to track state of hyperopt""" state: HyperoptState = HyperoptState.OPTIMIZE @@ -44,7 +44,7 @@ class HyperoptStateContainer(): cls.state = value -class HyperoptTools(): +class HyperoptTools: @staticmethod def get_strategy_filename(config: Config, strategy_name: str) -> Optional[Path]: diff --git a/freqtrade/optimize/lookahead_analysis.py b/freqtrade/optimize/lookahead_analysis.py new file mode 100755 index 000000000..dcc1088b3 --- /dev/null +++ b/freqtrade/optimize/lookahead_analysis.py @@ -0,0 +1,275 @@ +import logging +import shutil +from copy import deepcopy +from datetime import datetime, timedelta, timezone +from pathlib import Path +from typing import Any, Dict, List, Optional + +from pandas import DataFrame + +from freqtrade.configuration import TimeRange +from freqtrade.data.history import get_timerange +from freqtrade.exchange import timeframe_to_minutes +from freqtrade.loggers.set_log_levels import (reduce_verbosity_for_bias_tester, + restore_verbosity_for_bias_tester) +from freqtrade.optimize.backtesting import Backtesting + + +logger = logging.getLogger(__name__) + + +class VarHolder: + timerange: TimeRange + data: DataFrame + indicators: Dict[str, DataFrame] + result: DataFrame + compared: DataFrame + from_dt: datetime + to_dt: datetime + compared_dt: datetime + timeframe: str + + +class Analysis: + def __init__(self) -> None: + self.total_signals = 0 + self.false_entry_signals = 0 + self.false_exit_signals = 0 + self.false_indicators: List[str] = [] + self.has_bias = False + + +class LookaheadAnalysis: + + def __init__(self, config: Dict[str, Any], strategy_obj: Dict): + self.failed_bias_check = True + self.full_varHolder = VarHolder() + + self.entry_varHolders: List[VarHolder] = [] + self.exit_varHolders: List[VarHolder] = [] + self.exchange: Optional[Any] = None + + # pull variables the scope of the lookahead_analysis-instance + self.local_config = deepcopy(config) + self.local_config['strategy'] = strategy_obj['name'] + self.current_analysis = Analysis() + self.minimum_trade_amount = config['minimum_trade_amount'] + self.targeted_trade_amount = config['targeted_trade_amount'] + self.strategy_obj = strategy_obj + + @staticmethod + def dt_to_timestamp(dt: datetime): + timestamp = int(dt.replace(tzinfo=timezone.utc).timestamp()) + return timestamp + + @staticmethod + def get_result(backtesting: Backtesting, processed: DataFrame): + min_date, max_date = get_timerange(processed) + + result = backtesting.backtest( + processed=deepcopy(processed), + start_date=min_date, + end_date=max_date + ) + return result + + @staticmethod + def report_signal(result: dict, column_name: str, checked_timestamp: datetime): + df = result['results'] + row_count = df[column_name].shape[0] + + if row_count == 0: + return False + else: + + df_cut = df[(df[column_name] == checked_timestamp)] + if df_cut[column_name].shape[0] == 0: + return False + else: + return True + return False + + # analyzes two data frames with processed indicators and shows differences between them. + def analyze_indicators(self, full_vars: VarHolder, cut_vars: VarHolder, current_pair: str): + # extract dataframes + cut_df: DataFrame = cut_vars.indicators[current_pair] + full_df: DataFrame = full_vars.indicators[current_pair] + + # cut longer dataframe to length of the shorter + full_df_cut = full_df[ + (full_df.date == cut_vars.compared_dt) + ].reset_index(drop=True) + cut_df_cut = cut_df[ + (cut_df.date == cut_vars.compared_dt) + ].reset_index(drop=True) + + # check if dataframes are not empty + if full_df_cut.shape[0] != 0 and cut_df_cut.shape[0] != 0: + + # compare dataframes + compare_df = full_df_cut.compare(cut_df_cut) + + if compare_df.shape[0] > 0: + for col_name, values in compare_df.items(): + col_idx = compare_df.columns.get_loc(col_name) + compare_df_row = compare_df.iloc[0] + # compare_df now comprises tuples with [1] having either 'self' or 'other' + if 'other' in col_name[1]: + continue + self_value = compare_df_row[col_idx] + other_value = compare_df_row[col_idx + 1] + + # output differences + if self_value != other_value: + + if not self.current_analysis.false_indicators.__contains__(col_name[0]): + self.current_analysis.false_indicators.append(col_name[0]) + logger.info(f"=> found look ahead bias in indicator " + f"{col_name[0]}. " + f"{str(self_value)} != {str(other_value)}") + + def prepare_data(self, varholder: VarHolder, pairs_to_load: List[DataFrame]): + + if 'freqai' in self.local_config and 'identifier' in self.local_config['freqai']: + # purge previous data if the freqai model is defined + # (to be sure nothing is carried over from older backtests) + path_to_current_identifier = ( + Path(f"{self.local_config['user_data_dir']}/models/" + f"{self.local_config['freqai']['identifier']}").resolve()) + # remove folder and its contents + if Path.exists(path_to_current_identifier): + shutil.rmtree(path_to_current_identifier) + + prepare_data_config = deepcopy(self.local_config) + prepare_data_config['timerange'] = (str(self.dt_to_timestamp(varholder.from_dt)) + "-" + + str(self.dt_to_timestamp(varholder.to_dt))) + prepare_data_config['exchange']['pair_whitelist'] = pairs_to_load + + backtesting = Backtesting(prepare_data_config, self.exchange) + self.exchange = backtesting.exchange + backtesting._set_strategy(backtesting.strategylist[0]) + + varholder.data, varholder.timerange = backtesting.load_bt_data() + backtesting.load_bt_data_detail() + varholder.timeframe = backtesting.timeframe + + varholder.indicators = backtesting.strategy.advise_all_indicators(varholder.data) + varholder.result = self.get_result(backtesting, varholder.indicators) + + def fill_full_varholder(self): + self.full_varHolder = VarHolder() + + # define datetime in human-readable format + parsed_timerange = TimeRange.parse_timerange(self.local_config['timerange']) + + if parsed_timerange.startdt is None: + self.full_varHolder.from_dt = datetime.fromtimestamp(0, tz=timezone.utc) + else: + self.full_varHolder.from_dt = parsed_timerange.startdt + + if parsed_timerange.stopdt is None: + self.full_varHolder.to_dt = datetime.utcnow() + else: + self.full_varHolder.to_dt = parsed_timerange.stopdt + + self.prepare_data(self.full_varHolder, self.local_config['pairs']) + + def fill_entry_and_exit_varHolders(self, result_row): + # entry_varHolder + entry_varHolder = VarHolder() + self.entry_varHolders.append(entry_varHolder) + entry_varHolder.from_dt = self.full_varHolder.from_dt + entry_varHolder.compared_dt = result_row['open_date'] + # to_dt needs +1 candle since it won't buy on the last candle + entry_varHolder.to_dt = ( + result_row['open_date'] + + timedelta(minutes=timeframe_to_minutes(self.full_varHolder.timeframe))) + self.prepare_data(entry_varHolder, [result_row['pair']]) + + # exit_varHolder + exit_varHolder = VarHolder() + self.exit_varHolders.append(exit_varHolder) + # to_dt needs +1 candle since it will always exit/force-exit trades on the last candle + exit_varHolder.from_dt = self.full_varHolder.from_dt + exit_varHolder.to_dt = ( + result_row['close_date'] + + timedelta(minutes=timeframe_to_minutes(self.full_varHolder.timeframe))) + exit_varHolder.compared_dt = result_row['close_date'] + self.prepare_data(exit_varHolder, [result_row['pair']]) + + # now we analyze a full trade of full_varholder and look for analyze its bias + def analyze_row(self, idx, result_row): + # if force-sold, ignore this signal since here it will unconditionally exit. + if result_row.close_date == self.dt_to_timestamp(self.full_varHolder.to_dt): + return + + # keep track of how many signals are processed at total + self.current_analysis.total_signals += 1 + + # fill entry_varHolder and exit_varHolder + self.fill_entry_and_exit_varHolders(result_row) + + # register if buy signal is broken + if not self.report_signal( + self.entry_varHolders[idx].result, + "open_date", + self.entry_varHolders[idx].compared_dt): + self.current_analysis.false_entry_signals += 1 + + # register if buy or sell signal is broken + if not self.report_signal( + self.exit_varHolders[idx].result, + "close_date", + self.exit_varHolders[idx].compared_dt): + self.current_analysis.false_exit_signals += 1 + + # check if the indicators themselves contain biased data + self.analyze_indicators(self.full_varHolder, self.entry_varHolders[idx], result_row['pair']) + self.analyze_indicators(self.full_varHolder, self.exit_varHolders[idx], result_row['pair']) + + def start(self) -> None: + + # first make a single backtest + self.fill_full_varholder() + + reduce_verbosity_for_bias_tester() + + # check if requirements have been met of full_varholder + found_signals: int = self.full_varHolder.result['results'].shape[0] + 1 + if found_signals >= self.targeted_trade_amount: + logger.info(f"Found {found_signals} trades, " + f"calculating {self.targeted_trade_amount} trades.") + elif self.targeted_trade_amount >= found_signals >= self.minimum_trade_amount: + logger.info(f"Only found {found_signals} trades. Calculating all available trades.") + else: + logger.info(f"found {found_signals} trades " + f"which is less than minimum_trade_amount {self.minimum_trade_amount}. " + f"Cancelling this backtest lookahead bias test.") + return + + # now we loop through all signals + # starting from the same datetime to avoid miss-reports of bias + for idx, result_row in self.full_varHolder.result['results'].iterrows(): + if self.current_analysis.total_signals == self.targeted_trade_amount: + break + self.analyze_row(idx, result_row) + + # Restore verbosity, so it's not too quiet for the next strategy + restore_verbosity_for_bias_tester() + # check and report signals + if self.current_analysis.total_signals < self.local_config['minimum_trade_amount']: + logger.info(f" -> {self.local_config['strategy']} : too few trades. " + f"We only found {self.current_analysis.total_signals} trades. " + f"Hint: Extend the timerange " + f"to get at least {self.local_config['minimum_trade_amount']} " + f"or lower the value of minimum_trade_amount.") + self.failed_bias_check = True + elif (self.current_analysis.false_entry_signals > 0 or + self.current_analysis.false_exit_signals > 0 or + len(self.current_analysis.false_indicators) > 0): + logger.info(f" => {self.local_config['strategy']} : bias detected!") + self.current_analysis.has_bias = True + self.failed_bias_check = False + else: + logger.info(self.local_config['strategy'] + ": no bias detected") + self.failed_bias_check = False diff --git a/freqtrade/optimize/lookahead_analysis_helpers.py b/freqtrade/optimize/lookahead_analysis_helpers.py new file mode 100644 index 000000000..702eee774 --- /dev/null +++ b/freqtrade/optimize/lookahead_analysis_helpers.py @@ -0,0 +1,202 @@ +import logging +import time +from pathlib import Path +from typing import Any, Dict, List + +import pandas as pd + +from freqtrade.constants import Config +from freqtrade.exceptions import OperationalException +from freqtrade.optimize.lookahead_analysis import LookaheadAnalysis +from freqtrade.resolvers import StrategyResolver + + +logger = logging.getLogger(__name__) + + +class LookaheadAnalysisSubFunctions: + + @staticmethod + def text_table_lookahead_analysis_instances( + config: Dict[str, Any], + lookahead_instances: List[LookaheadAnalysis]): + headers = ['filename', 'strategy', 'has_bias', 'total_signals', + 'biased_entry_signals', 'biased_exit_signals', 'biased_indicators'] + data = [] + for inst in lookahead_instances: + if config['minimum_trade_amount'] > inst.current_analysis.total_signals: + data.append( + [ + inst.strategy_obj['location'].parts[-1], + inst.strategy_obj['name'], + "too few trades caught " + f"({inst.current_analysis.total_signals}/{config['minimum_trade_amount']})." + f"Test failed." + ] + ) + elif inst.failed_bias_check: + data.append( + [ + inst.strategy_obj['location'].parts[-1], + inst.strategy_obj['name'], + 'error while checking' + ] + ) + else: + data.append( + [ + inst.strategy_obj['location'].parts[-1], + inst.strategy_obj['name'], + inst.current_analysis.has_bias, + inst.current_analysis.total_signals, + inst.current_analysis.false_entry_signals, + inst.current_analysis.false_exit_signals, + ", ".join(inst.current_analysis.false_indicators) + ] + ) + from tabulate import tabulate + table = tabulate(data, headers=headers, tablefmt="orgtbl") + print(table) + return table, headers, data + + @staticmethod + def export_to_csv(config: Dict[str, Any], lookahead_analysis: List[LookaheadAnalysis]): + def add_or_update_row(df, row_data): + if ( + (df['filename'] == row_data['filename']) & + (df['strategy'] == row_data['strategy']) + ).any(): + # Update existing row + pd_series = pd.DataFrame([row_data]) + df.loc[ + (df['filename'] == row_data['filename']) & + (df['strategy'] == row_data['strategy']) + ] = pd_series + else: + # Add new row + df = pd.concat([df, pd.DataFrame([row_data], columns=df.columns)]) + + return df + + if Path(config['lookahead_analysis_exportfilename']).exists(): + # Read CSV file into a pandas dataframe + csv_df = pd.read_csv(config['lookahead_analysis_exportfilename']) + else: + # Create a new empty DataFrame with the desired column names and set the index + csv_df = pd.DataFrame(columns=[ + 'filename', 'strategy', 'has_bias', 'total_signals', + 'biased_entry_signals', 'biased_exit_signals', 'biased_indicators' + ], + index=None) + + for inst in lookahead_analysis: + # only update if + if (inst.current_analysis.total_signals > config['minimum_trade_amount'] + and inst.failed_bias_check is not True): + new_row_data = {'filename': inst.strategy_obj['location'].parts[-1], + 'strategy': inst.strategy_obj['name'], + 'has_bias': inst.current_analysis.has_bias, + 'total_signals': + int(inst.current_analysis.total_signals), + 'biased_entry_signals': + int(inst.current_analysis.false_entry_signals), + 'biased_exit_signals': + int(inst.current_analysis.false_exit_signals), + 'biased_indicators': + ",".join(inst.current_analysis.false_indicators)} + csv_df = add_or_update_row(csv_df, new_row_data) + + # Fill NaN values with a default value (e.g., 0) + csv_df['total_signals'] = csv_df['total_signals'].fillna(0) + csv_df['biased_entry_signals'] = csv_df['biased_entry_signals'].fillna(0) + csv_df['biased_exit_signals'] = csv_df['biased_exit_signals'].fillna(0) + + # Convert columns to integers + csv_df['total_signals'] = csv_df['total_signals'].astype(int) + csv_df['biased_entry_signals'] = csv_df['biased_entry_signals'].astype(int) + csv_df['biased_exit_signals'] = csv_df['biased_exit_signals'].astype(int) + + logger.info(f"saving {config['lookahead_analysis_exportfilename']}") + csv_df.to_csv(config['lookahead_analysis_exportfilename'], index=False) + + @staticmethod + def calculate_config_overrides(config: Config): + if config['targeted_trade_amount'] < config['minimum_trade_amount']: + # this combo doesn't make any sense. + raise OperationalException( + "Targeted trade amount can't be smaller than minimum trade amount." + ) + if len(config['pairs']) > config['max_open_trades']: + logger.info('Max_open_trades were less than amount of pairs. ' + 'Set max_open_trades to amount of pairs just to avoid false positives.') + config['max_open_trades'] = len(config['pairs']) + + min_dry_run_wallet = 1000000000 + if config['dry_run_wallet'] < min_dry_run_wallet: + logger.info('Dry run wallet was not set to 1 billion, pushing it up there ' + 'just to avoid false positives') + config['dry_run_wallet'] = min_dry_run_wallet + + # enforce cache to be 'none', shift it to 'none' if not already + # (since the default value is 'day') + if config.get('backtest_cache') is None: + config['backtest_cache'] = 'none' + elif config['backtest_cache'] != 'none': + logger.info(f"backtest_cache = " + f"{config['backtest_cache']} detected. " + f"Inside lookahead-analysis it is enforced to be 'none'. " + f"Changed it to 'none'") + config['backtest_cache'] = 'none' + return config + + @staticmethod + def initialize_single_lookahead_analysis(config: Config, strategy_obj: Dict[str, Any]): + + logger.info(f"Bias test of {Path(strategy_obj['location']).name} started.") + start = time.perf_counter() + current_instance = LookaheadAnalysis(config, strategy_obj) + current_instance.start() + elapsed = time.perf_counter() - start + logger.info(f"Checking look ahead bias via backtests " + f"of {Path(strategy_obj['location']).name} " + f"took {elapsed:.0f} seconds.") + return current_instance + + @staticmethod + def start(config: Config): + config = LookaheadAnalysisSubFunctions.calculate_config_overrides(config) + + strategy_objs = StrategyResolver.search_all_objects( + config, enum_failed=False, recursive=config.get('recursive_strategy_search', False)) + + lookaheadAnalysis_instances = [] + + # unify --strategy and --strategy_list to one list + if not (strategy_list := config.get('strategy_list', [])): + if config.get('strategy') is None: + raise OperationalException( + "No Strategy specified. Please specify a strategy via --strategy or " + "--strategy_list" + ) + strategy_list = [config['strategy']] + + # check if strategies can be properly loaded, only check them if they can be. + for strat in strategy_list: + for strategy_obj in strategy_objs: + if strategy_obj['name'] == strat and strategy_obj not in strategy_list: + lookaheadAnalysis_instances.append( + LookaheadAnalysisSubFunctions.initialize_single_lookahead_analysis( + config, strategy_obj)) + break + + # report the results + if lookaheadAnalysis_instances: + LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( + config, lookaheadAnalysis_instances) + if config.get('lookahead_analysis_exportfilename') is not None: + LookaheadAnalysisSubFunctions.export_to_csv(config, lookaheadAnalysis_instances) + else: + logger.error("There were no strategies specified neither through " + "--strategy nor through " + "--strategy_list " + "or timeframe was not specified.") diff --git a/freqtrade/optimize/optimize_reports/__init__.py b/freqtrade/optimize/optimize_reports/__init__.py new file mode 100644 index 000000000..68e222d00 --- /dev/null +++ b/freqtrade/optimize/optimize_reports/__init__.py @@ -0,0 +1,18 @@ +# flake8: noqa: F401 +from freqtrade.optimize.optimize_reports.bt_output import (generate_edge_table, + show_backtest_result, + show_backtest_results, + show_sorted_pairlist, + text_table_add_metrics, + text_table_bt_results, + text_table_exit_reason, + text_table_periodic_breakdown, + text_table_strategy, text_table_tags) +from freqtrade.optimize.optimize_reports.bt_storage import (store_backtest_analysis_results, + store_backtest_stats) +from freqtrade.optimize.optimize_reports.optimize_reports import ( + generate_all_periodic_breakdown_stats, generate_backtest_stats, generate_daily_stats, + generate_exit_reason_stats, generate_pair_metrics, generate_periodic_breakdown_stats, + generate_rejected_signals, generate_strategy_comparison, generate_strategy_stats, + generate_tag_metrics, generate_trade_signal_candles, generate_trading_stats, + generate_wins_draws_losses) diff --git a/freqtrade/optimize/optimize_reports/bt_output.py b/freqtrade/optimize/optimize_reports/bt_output.py new file mode 100644 index 000000000..1fd1f7a34 --- /dev/null +++ b/freqtrade/optimize/optimize_reports/bt_output.py @@ -0,0 +1,405 @@ +import logging +from typing import Any, Dict, List + +from tabulate import tabulate + +from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config +from freqtrade.misc import decimals_per_coin, round_coin_value +from freqtrade.optimize.optimize_reports.optimize_reports import (generate_periodic_breakdown_stats, + generate_wins_draws_losses) + + +logger = logging.getLogger(__name__) + + +def _get_line_floatfmt(stake_currency: str) -> List[str]: + """ + Generate floatformat (goes in line with _generate_result_line()) + """ + return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f', + '.2f', 'd', 's', 's'] + + +def _get_line_header(first_column: str, stake_currency: str, + direction: str = 'Entries') -> List[str]: + """ + Generate header lines (goes in line with _generate_result_line()) + """ + return [first_column, direction, 'Avg Profit %', 'Cum Profit %', + f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration', + 'Win Draw Loss Win%'] + + +def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str: + """ + Generates and returns a text table for the given backtest data and the results dataframe + :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row + :param stake_currency: stake-currency - used to correctly name headers + :return: pretty printed table with tabulate as string + """ + + headers = _get_line_header('Pair', stake_currency) + floatfmt = _get_line_floatfmt(stake_currency) + output = [[ + t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'], + t['profit_total_pct'], t['duration_avg'], + generate_wins_draws_losses(t['wins'], t['draws'], t['losses']) + ] for t in pair_results] + # Ignore type as floatfmt does allow tuples but mypy does not know that + return tabulate(output, headers=headers, + floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") + + +def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str: + """ + Generate small table outlining Backtest results + :param sell_reason_stats: Exit reason metrics + :param stake_currency: Stakecurrency used + :return: pretty printed table with tabulate as string + """ + headers = [ + 'Exit Reason', + 'Exits', + 'Win Draws Loss Win%', + 'Avg Profit %', + 'Cum Profit %', + f'Tot Profit {stake_currency}', + 'Tot Profit %', + ] + + output = [[ + t.get('exit_reason', t.get('sell_reason')), t['trades'], + generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), + t['profit_mean_pct'], t['profit_sum_pct'], + round_coin_value(t['profit_total_abs'], stake_currency, False), + t['profit_total_pct'], + ] for t in exit_reason_stats] + return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right") + + +def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str: + """ + Generates and returns a text table for the given backtest data and the results dataframe + :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row + :param stake_currency: stake-currency - used to correctly name headers + :return: pretty printed table with tabulate as string + """ + if (tag_type == "enter_tag"): + headers = _get_line_header("TAG", stake_currency) + else: + headers = _get_line_header("TAG", stake_currency, 'Exits') + floatfmt = _get_line_floatfmt(stake_currency) + output = [ + [ + t['key'] if t['key'] is not None and len( + t['key']) > 0 else "OTHER", + t['trades'], + t['profit_mean_pct'], + t['profit_sum_pct'], + t['profit_total_abs'], + t['profit_total_pct'], + t['duration_avg'], + generate_wins_draws_losses( + t['wins'], + t['draws'], + t['losses'])] for t in tag_results] + # Ignore type as floatfmt does allow tuples but mypy does not know that + return tabulate(output, headers=headers, + floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") + + +def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]], + stake_currency: str, period: str) -> str: + """ + Generate small table with Backtest results by days + :param days_breakdown_stats: Days breakdown metrics + :param stake_currency: Stakecurrency used + :return: pretty printed table with tabulate as string + """ + headers = [ + period.capitalize(), + f'Tot Profit {stake_currency}', + 'Wins', + 'Draws', + 'Losses', + ] + output = [[ + d['date'], round_coin_value(d['profit_abs'], stake_currency, False), + d['wins'], d['draws'], d['loses'], + ] for d in days_breakdown_stats] + return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right") + + +def text_table_strategy(strategy_results, stake_currency: str) -> str: + """ + Generate summary table per strategy + :param strategy_results: Dict of containing results for all strategies + :param stake_currency: stake-currency - used to correctly name headers + :return: pretty printed table with tabulate as string + """ + floatfmt = _get_line_floatfmt(stake_currency) + headers = _get_line_header('Strategy', stake_currency) + # _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless + # therefore we slip this column in only for strategy summary here. + headers.append('Drawdown') + + # Align drawdown string on the center two space separator. + if 'max_drawdown_account' in strategy_results[0]: + drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results] + else: + # Support for prior backtest results + drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results] + + dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results]) + dd_pad_per = max([len(dd) for dd in drawdown]) + drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%' + for t, dd in zip(strategy_results, drawdown)] + + output = [[ + t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'], + t['profit_total_pct'], t['duration_avg'], + generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown] + for t, drawdown in zip(strategy_results, drawdown)] + # Ignore type as floatfmt does allow tuples but mypy does not know that + return tabulate(output, headers=headers, + floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") + + +def text_table_add_metrics(strat_results: Dict) -> str: + if len(strat_results['trades']) > 0: + best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio']) + worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio']) + + short_metrics = [ + ('', ''), # Empty line to improve readability + ('Long / Short', + f"{strat_results.get('trade_count_long', 'total_trades')} / " + f"{strat_results.get('trade_count_short', 0)}"), + ('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"), + ('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"), + ('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'], + strat_results['stake_currency'])), + ('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'], + strat_results['stake_currency'])), + ] if strat_results.get('trade_count_short', 0) > 0 else [] + + drawdown_metrics = [] + if 'max_relative_drawdown' in strat_results: + # Compatibility to show old hyperopt results + drawdown_metrics.append( + ('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}") + ) + drawdown_metrics.extend([ + ('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}") + if 'max_drawdown_account' in strat_results else ( + 'Drawdown', f"{strat_results['max_drawdown']:.2%}"), + ('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'], + strat_results['stake_currency'])), + ('Drawdown high', round_coin_value(strat_results['max_drawdown_high'], + strat_results['stake_currency'])), + ('Drawdown low', round_coin_value(strat_results['max_drawdown_low'], + strat_results['stake_currency'])), + ('Drawdown Start', strat_results['drawdown_start']), + ('Drawdown End', strat_results['drawdown_end']), + ]) + + entry_adjustment_metrics = [ + ('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')), + ('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')), + ('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')), + ] if strat_results.get('canceled_entry_orders', 0) > 0 else [] + + # Newly added fields should be ignored if they are missing in strat_results. hyperopt-show + # command stores these results and newer version of freqtrade must be able to handle old + # results with missing new fields. + metrics = [ + ('Backtesting from', strat_results['backtest_start']), + ('Backtesting to', strat_results['backtest_end']), + ('Max open trades', strat_results['max_open_trades']), + ('', ''), # Empty line to improve readability + ('Total/Daily Avg Trades', + f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"), + + ('Starting balance', round_coin_value(strat_results['starting_balance'], + strat_results['stake_currency'])), + ('Final balance', round_coin_value(strat_results['final_balance'], + strat_results['stake_currency'])), + ('Absolute profit ', round_coin_value(strat_results['profit_total_abs'], + strat_results['stake_currency'])), + ('Total profit %', f"{strat_results['profit_total']:.2%}"), + ('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'), + ('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'), + ('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'), + ('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'), + ('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor' + in strat_results else 'N/A'), + ('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy' + in strat_results else 'N/A'), + ('Trades per day', strat_results['trades_per_day']), + ('Avg. daily profit %', + f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"), + ('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'], + strat_results['stake_currency'])), + ('Total trade volume', round_coin_value(strat_results['total_volume'], + strat_results['stake_currency'])), + *short_metrics, + ('', ''), # Empty line to improve readability + ('Best Pair', f"{strat_results['best_pair']['key']} " + f"{strat_results['best_pair']['profit_sum']:.2%}"), + ('Worst Pair', f"{strat_results['worst_pair']['key']} " + f"{strat_results['worst_pair']['profit_sum']:.2%}"), + ('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"), + ('Worst trade', f"{worst_trade['pair']} " + f"{worst_trade['profit_ratio']:.2%}"), + + ('Best day', round_coin_value(strat_results['backtest_best_day_abs'], + strat_results['stake_currency'])), + ('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'], + strat_results['stake_currency'])), + ('Days win/draw/lose', f"{strat_results['winning_days']} / " + f"{strat_results['draw_days']} / {strat_results['losing_days']}"), + ('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"), + ('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"), + ('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')), + ('Entry/Exit Timeouts', + f"{strat_results.get('timedout_entry_orders', 'N/A')} / " + f"{strat_results.get('timedout_exit_orders', 'N/A')}"), + *entry_adjustment_metrics, + ('', ''), # Empty line to improve readability + + ('Min balance', round_coin_value(strat_results['csum_min'], + strat_results['stake_currency'])), + ('Max balance', round_coin_value(strat_results['csum_max'], + strat_results['stake_currency'])), + + *drawdown_metrics, + ('Market change', f"{strat_results['market_change']:.2%}"), + ] + + return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl") + else: + start_balance = round_coin_value(strat_results['starting_balance'], + strat_results['stake_currency']) + stake_amount = round_coin_value( + strat_results['stake_amount'], strat_results['stake_currency'] + ) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited' + + message = ("No trades made. " + f"Your starting balance was {start_balance}, " + f"and your stake was {stake_amount}." + ) + return message + + +def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str, + backtest_breakdown=[]): + """ + Print results for one strategy + """ + # Print results + print(f"Result for strategy {strategy}") + table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency) + if isinstance(table, str): + print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '=')) + print(table) + + table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency) + if isinstance(table, str) and len(table) > 0: + print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '=')) + print(table) + + if (results.get('results_per_enter_tag') is not None + or results.get('results_per_buy_tag') is not None): + # results_per_buy_tag is deprecated and should be removed 2 versions after short golive. + table = text_table_tags( + "enter_tag", + results.get('results_per_enter_tag', results.get('results_per_buy_tag')), + stake_currency=stake_currency) + + if isinstance(table, str) and len(table) > 0: + print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '=')) + print(table) + + exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary')) + table = text_table_exit_reason(exit_reason_stats=exit_reasons, + stake_currency=stake_currency) + if isinstance(table, str) and len(table) > 0: + print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '=')) + print(table) + + for period in backtest_breakdown: + if period in results.get('periodic_breakdown', {}): + days_breakdown_stats = results['periodic_breakdown'][period] + else: + days_breakdown_stats = generate_periodic_breakdown_stats( + trade_list=results['trades'], period=period) + table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats, + stake_currency=stake_currency, period=period) + if isinstance(table, str) and len(table) > 0: + print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '=')) + print(table) + + table = text_table_add_metrics(results) + if isinstance(table, str) and len(table) > 0: + print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '=')) + print(table) + + if isinstance(table, str) and len(table) > 0: + print('=' * len(table.splitlines()[0])) + + print() + + +def show_backtest_results(config: Config, backtest_stats: Dict): + stake_currency = config['stake_currency'] + + for strategy, results in backtest_stats['strategy'].items(): + show_backtest_result( + strategy, results, stake_currency, + config.get('backtest_breakdown', [])) + + if len(backtest_stats['strategy']) > 0: + # Print Strategy summary table + + table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency) + print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |" + f" Max open trades : {results['max_open_trades']}") + print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '=')) + print(table) + print('=' * len(table.splitlines()[0])) + print('\nFor more details, please look at the detail tables above') + + +def show_sorted_pairlist(config: Config, backtest_stats: Dict): + if config.get('backtest_show_pair_list', False): + for strategy, results in backtest_stats['strategy'].items(): + print(f"Pairs for Strategy {strategy}: \n[") + for result in results['results_per_pair']: + if result["key"] != 'TOTAL': + print(f'"{result["key"]}", // {result["profit_mean"]:.2%}') + print("]") + + +def generate_edge_table(results: dict) -> str: + floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd') + tabular_data = [] + headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio', + 'Required Risk Reward', 'Expectancy', 'Total Number of Trades', + 'Average Duration (min)'] + + for result in results.items(): + if result[1].nb_trades > 0: + tabular_data.append([ + result[0], + result[1].stoploss, + result[1].winrate, + result[1].risk_reward_ratio, + result[1].required_risk_reward, + result[1].expectancy, + result[1].nb_trades, + round(result[1].avg_trade_duration) + ]) + + # Ignore type as floatfmt does allow tuples but mypy does not know that + return tabulate(tabular_data, headers=headers, + floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") diff --git a/freqtrade/optimize/optimize_reports/bt_storage.py b/freqtrade/optimize/optimize_reports/bt_storage.py new file mode 100644 index 000000000..af97753e3 --- /dev/null +++ b/freqtrade/optimize/optimize_reports/bt_storage.py @@ -0,0 +1,71 @@ +import logging +from pathlib import Path +from typing import Dict + +from pandas import DataFrame + +from freqtrade.constants import LAST_BT_RESULT_FN +from freqtrade.misc import file_dump_joblib, file_dump_json +from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename + + +logger = logging.getLogger(__name__) + + +def store_backtest_stats( + recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None: + """ + Stores backtest results + :param recordfilename: Path object, which can either be a filename or a directory. + Filenames will be appended with a timestamp right before the suffix + while for directories, /backtest-result-.json will be used as filename + :param stats: Dataframe containing the backtesting statistics + :param dtappendix: Datetime to use for the filename + """ + if recordfilename.is_dir(): + filename = (recordfilename / f'backtest-result-{dtappendix}.json') + else: + filename = Path.joinpath( + recordfilename.parent, f'{recordfilename.stem}-{dtappendix}' + ).with_suffix(recordfilename.suffix) + + # Store metadata separately. + file_dump_json(get_backtest_metadata_filename(filename), stats['metadata']) + del stats['metadata'] + + file_dump_json(filename, stats) + + latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN) + file_dump_json(latest_filename, {'latest_backtest': str(filename.name)}) + + +def _store_backtest_analysis_data( + recordfilename: Path, data: Dict[str, Dict], + dtappendix: str, name: str) -> Path: + """ + Stores backtest trade candles for analysis + :param recordfilename: Path object, which can either be a filename or a directory. + Filenames will be appended with a timestamp right before the suffix + while for directories, /backtest-result-_.pkl will be used + as filename + :param candles: Dict containing the backtesting data for analysis + :param dtappendix: Datetime to use for the filename + :param name: Name to use for the file, e.g. signals, rejected + """ + if recordfilename.is_dir(): + filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl') + else: + filename = Path.joinpath( + recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl' + ) + + file_dump_joblib(filename, data) + + return filename + + +def store_backtest_analysis_results( + recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict], + dtappendix: str) -> None: + _store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals") + _store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected") diff --git a/freqtrade/optimize/optimize_reports.py b/freqtrade/optimize/optimize_reports/optimize_reports.py similarity index 51% rename from freqtrade/optimize/optimize_reports.py rename to freqtrade/optimize/optimize_reports/optimize_reports.py index e60047a79..015f163e3 100644 --- a/freqtrade/optimize/optimize_reports.py +++ b/freqtrade/optimize/optimize_reports/optimize_reports.py @@ -1,83 +1,20 @@ import logging from copy import deepcopy from datetime import datetime, timedelta, timezone -from pathlib import Path from typing import Any, Dict, List, Union from pandas import DataFrame, concat, to_datetime -from tabulate import tabulate -from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN, - UNLIMITED_STAKE_AMOUNT, Config, IntOrInf) +from freqtrade.constants import BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, IntOrInf from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum, calculate_expectancy, calculate_market_change, calculate_max_drawdown, calculate_sharpe, calculate_sortino) -from freqtrade.misc import decimals_per_coin, file_dump_joblib, file_dump_json, round_coin_value -from freqtrade.optimize.backtest_caching import get_backtest_metadata_filename +from freqtrade.misc import decimals_per_coin, round_coin_value logger = logging.getLogger(__name__) -def store_backtest_stats( - recordfilename: Path, stats: Dict[str, DataFrame], dtappendix: str) -> None: - """ - Stores backtest results - :param recordfilename: Path object, which can either be a filename or a directory. - Filenames will be appended with a timestamp right before the suffix - while for directories, /backtest-result-.json will be used as filename - :param stats: Dataframe containing the backtesting statistics - :param dtappendix: Datetime to use for the filename - """ - if recordfilename.is_dir(): - filename = (recordfilename / f'backtest-result-{dtappendix}.json') - else: - filename = Path.joinpath( - recordfilename.parent, f'{recordfilename.stem}-{dtappendix}' - ).with_suffix(recordfilename.suffix) - - # Store metadata separately. - file_dump_json(get_backtest_metadata_filename(filename), stats['metadata']) - del stats['metadata'] - - file_dump_json(filename, stats) - - latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN) - file_dump_json(latest_filename, {'latest_backtest': str(filename.name)}) - - -def _store_backtest_analysis_data( - recordfilename: Path, data: Dict[str, Dict], - dtappendix: str, name: str) -> Path: - """ - Stores backtest trade candles for analysis - :param recordfilename: Path object, which can either be a filename or a directory. - Filenames will be appended with a timestamp right before the suffix - while for directories, /backtest-result-_.pkl will be used - as filename - :param candles: Dict containing the backtesting data for analysis - :param dtappendix: Datetime to use for the filename - :param name: Name to use for the file, e.g. signals, rejected - """ - if recordfilename.is_dir(): - filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl') - else: - filename = Path.joinpath( - recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl' - ) - - file_dump_joblib(filename, data) - - return filename - - -def store_backtest_analysis_results( - recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict], - dtappendix: str) -> None: - _store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals") - _store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected") - - def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame], bt_results: Dict[str, Any]) -> DataFrame: signal_candles_only = {} @@ -120,24 +57,6 @@ def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame], return rejected_candles_only -def _get_line_floatfmt(stake_currency: str) -> List[str]: - """ - Generate floatformat (goes in line with _generate_result_line()) - """ - return ['s', 'd', '.2f', '.2f', f'.{decimals_per_coin(stake_currency)}f', - '.2f', 'd', 's', 's'] - - -def _get_line_header(first_column: str, stake_currency: str, - direction: str = 'Entries') -> List[str]: - """ - Generate header lines (goes in line with _generate_result_line()) - """ - return [first_column, direction, 'Avg Profit %', 'Cum Profit %', - f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration', - 'Win Draw Loss Win%'] - - def generate_wins_draws_losses(wins, draws, losses): if wins > 0 and losses == 0: wl_ratio = '100' @@ -295,31 +214,6 @@ def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]: return tabular_data -def generate_edge_table(results: dict) -> str: - floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd') - tabular_data = [] - headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio', - 'Required Risk Reward', 'Expectancy', 'Total Number of Trades', - 'Average Duration (min)'] - - for result in results.items(): - if result[1].nb_trades > 0: - tabular_data.append([ - result[0], - result[1].stoploss, - result[1].winrate, - result[1].risk_reward_ratio, - result[1].required_risk_reward, - result[1].expectancy, - result[1].nb_trades, - round(result[1].avg_trade_duration) - ]) - - # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(tabular_data, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") - - def _get_resample_from_period(period: str) -> str: if period == 'day': return '1d' @@ -652,357 +546,3 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame], result['strategy_comparison'] = strategy_results return result - - -### -# Start output section -### - -def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str: - """ - Generates and returns a text table for the given backtest data and the results dataframe - :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row - :param stake_currency: stake-currency - used to correctly name headers - :return: pretty printed table with tabulate as string - """ - - headers = _get_line_header('Pair', stake_currency) - floatfmt = _get_line_floatfmt(stake_currency) - output = [[ - t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'], - t['profit_total_pct'], t['duration_avg'], - generate_wins_draws_losses(t['wins'], t['draws'], t['losses']) - ] for t in pair_results] - # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(output, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") - - -def text_table_exit_reason(exit_reason_stats: List[Dict[str, Any]], stake_currency: str) -> str: - """ - Generate small table outlining Backtest results - :param sell_reason_stats: Exit reason metrics - :param stake_currency: Stakecurrency used - :return: pretty printed table with tabulate as string - """ - headers = [ - 'Exit Reason', - 'Exits', - 'Win Draws Loss Win%', - 'Avg Profit %', - 'Cum Profit %', - f'Tot Profit {stake_currency}', - 'Tot Profit %', - ] - - output = [[ - t.get('exit_reason', t.get('sell_reason')), t['trades'], - generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), - t['profit_mean_pct'], t['profit_sum_pct'], - round_coin_value(t['profit_total_abs'], stake_currency, False), - t['profit_total_pct'], - ] for t in exit_reason_stats] - return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right") - - -def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str: - """ - Generates and returns a text table for the given backtest data and the results dataframe - :param pair_results: List of Dictionaries - one entry per pair + final TOTAL row - :param stake_currency: stake-currency - used to correctly name headers - :return: pretty printed table with tabulate as string - """ - if (tag_type == "enter_tag"): - headers = _get_line_header("TAG", stake_currency) - else: - headers = _get_line_header("TAG", stake_currency, 'Exits') - floatfmt = _get_line_floatfmt(stake_currency) - output = [ - [ - t['key'] if t['key'] is not None and len( - t['key']) > 0 else "OTHER", - t['trades'], - t['profit_mean_pct'], - t['profit_sum_pct'], - t['profit_total_abs'], - t['profit_total_pct'], - t['duration_avg'], - generate_wins_draws_losses( - t['wins'], - t['draws'], - t['losses'])] for t in tag_results] - # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(output, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") - - -def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]], - stake_currency: str, period: str) -> str: - """ - Generate small table with Backtest results by days - :param days_breakdown_stats: Days breakdown metrics - :param stake_currency: Stakecurrency used - :return: pretty printed table with tabulate as string - """ - headers = [ - period.capitalize(), - f'Tot Profit {stake_currency}', - 'Wins', - 'Draws', - 'Losses', - ] - output = [[ - d['date'], round_coin_value(d['profit_abs'], stake_currency, False), - d['wins'], d['draws'], d['loses'], - ] for d in days_breakdown_stats] - return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right") - - -def text_table_strategy(strategy_results, stake_currency: str) -> str: - """ - Generate summary table per strategy - :param strategy_results: Dict of containing results for all strategies - :param stake_currency: stake-currency - used to correctly name headers - :return: pretty printed table with tabulate as string - """ - floatfmt = _get_line_floatfmt(stake_currency) - headers = _get_line_header('Strategy', stake_currency) - # _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless - # therefore we slip this column in only for strategy summary here. - headers.append('Drawdown') - - # Align drawdown string on the center two space separator. - if 'max_drawdown_account' in strategy_results[0]: - drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results] - else: - # Support for prior backtest results - drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results] - - dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results]) - dd_pad_per = max([len(dd) for dd in drawdown]) - drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%' - for t, dd in zip(strategy_results, drawdown)] - - output = [[ - t['key'], t['trades'], t['profit_mean_pct'], t['profit_sum_pct'], t['profit_total_abs'], - t['profit_total_pct'], t['duration_avg'], - generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown] - for t, drawdown in zip(strategy_results, drawdown)] - # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(output, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") - - -def text_table_add_metrics(strat_results: Dict) -> str: - if len(strat_results['trades']) > 0: - best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio']) - worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio']) - - short_metrics = [ - ('', ''), # Empty line to improve readability - ('Long / Short', - f"{strat_results.get('trade_count_long', 'total_trades')} / " - f"{strat_results.get('trade_count_short', 0)}"), - ('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"), - ('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"), - ('Absolute profit Long', round_coin_value(strat_results['profit_total_long_abs'], - strat_results['stake_currency'])), - ('Absolute profit Short', round_coin_value(strat_results['profit_total_short_abs'], - strat_results['stake_currency'])), - ] if strat_results.get('trade_count_short', 0) > 0 else [] - - drawdown_metrics = [] - if 'max_relative_drawdown' in strat_results: - # Compatibility to show old hyperopt results - drawdown_metrics.append( - ('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}") - ) - drawdown_metrics.extend([ - ('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}") - if 'max_drawdown_account' in strat_results else ( - 'Drawdown', f"{strat_results['max_drawdown']:.2%}"), - ('Absolute Drawdown', round_coin_value(strat_results['max_drawdown_abs'], - strat_results['stake_currency'])), - ('Drawdown high', round_coin_value(strat_results['max_drawdown_high'], - strat_results['stake_currency'])), - ('Drawdown low', round_coin_value(strat_results['max_drawdown_low'], - strat_results['stake_currency'])), - ('Drawdown Start', strat_results['drawdown_start']), - ('Drawdown End', strat_results['drawdown_end']), - ]) - - entry_adjustment_metrics = [ - ('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')), - ('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')), - ('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')), - ] if strat_results.get('canceled_entry_orders', 0) > 0 else [] - - # Newly added fields should be ignored if they are missing in strat_results. hyperopt-show - # command stores these results and newer version of freqtrade must be able to handle old - # results with missing new fields. - metrics = [ - ('Backtesting from', strat_results['backtest_start']), - ('Backtesting to', strat_results['backtest_end']), - ('Max open trades', strat_results['max_open_trades']), - ('', ''), # Empty line to improve readability - ('Total/Daily Avg Trades', - f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"), - - ('Starting balance', round_coin_value(strat_results['starting_balance'], - strat_results['stake_currency'])), - ('Final balance', round_coin_value(strat_results['final_balance'], - strat_results['stake_currency'])), - ('Absolute profit ', round_coin_value(strat_results['profit_total_abs'], - strat_results['stake_currency'])), - ('Total profit %', f"{strat_results['profit_total']:.2%}"), - ('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'), - ('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'), - ('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'), - ('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'), - ('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor' - in strat_results else 'N/A'), - ('Expectancy', f"{strat_results['expectancy']:.2f}" if 'expectancy' - in strat_results else 'N/A'), - ('Trades per day', strat_results['trades_per_day']), - ('Avg. daily profit %', - f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"), - ('Avg. stake amount', round_coin_value(strat_results['avg_stake_amount'], - strat_results['stake_currency'])), - ('Total trade volume', round_coin_value(strat_results['total_volume'], - strat_results['stake_currency'])), - *short_metrics, - ('', ''), # Empty line to improve readability - ('Best Pair', f"{strat_results['best_pair']['key']} " - f"{strat_results['best_pair']['profit_sum']:.2%}"), - ('Worst Pair', f"{strat_results['worst_pair']['key']} " - f"{strat_results['worst_pair']['profit_sum']:.2%}"), - ('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"), - ('Worst trade', f"{worst_trade['pair']} " - f"{worst_trade['profit_ratio']:.2%}"), - - ('Best day', round_coin_value(strat_results['backtest_best_day_abs'], - strat_results['stake_currency'])), - ('Worst day', round_coin_value(strat_results['backtest_worst_day_abs'], - strat_results['stake_currency'])), - ('Days win/draw/lose', f"{strat_results['winning_days']} / " - f"{strat_results['draw_days']} / {strat_results['losing_days']}"), - ('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"), - ('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"), - ('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')), - ('Entry/Exit Timeouts', - f"{strat_results.get('timedout_entry_orders', 'N/A')} / " - f"{strat_results.get('timedout_exit_orders', 'N/A')}"), - *entry_adjustment_metrics, - ('', ''), # Empty line to improve readability - - ('Min balance', round_coin_value(strat_results['csum_min'], - strat_results['stake_currency'])), - ('Max balance', round_coin_value(strat_results['csum_max'], - strat_results['stake_currency'])), - - *drawdown_metrics, - ('Market change', f"{strat_results['market_change']:.2%}"), - ] - - return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl") - else: - start_balance = round_coin_value(strat_results['starting_balance'], - strat_results['stake_currency']) - stake_amount = round_coin_value( - strat_results['stake_amount'], strat_results['stake_currency'] - ) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited' - - message = ("No trades made. " - f"Your starting balance was {start_balance}, " - f"and your stake was {stake_amount}." - ) - return message - - -def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str, - backtest_breakdown=[]): - """ - Print results for one strategy - """ - # Print results - print(f"Result for strategy {strategy}") - table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency) - if isinstance(table, str): - print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '=')) - print(table) - - table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency) - if isinstance(table, str) and len(table) > 0: - print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '=')) - print(table) - - if (results.get('results_per_enter_tag') is not None - or results.get('results_per_buy_tag') is not None): - # results_per_buy_tag is deprecated and should be removed 2 versions after short golive. - table = text_table_tags( - "enter_tag", - results.get('results_per_enter_tag', results.get('results_per_buy_tag')), - stake_currency=stake_currency) - - if isinstance(table, str) and len(table) > 0: - print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '=')) - print(table) - - exit_reasons = results.get('exit_reason_summary', results.get('sell_reason_summary')) - table = text_table_exit_reason(exit_reason_stats=exit_reasons, - stake_currency=stake_currency) - if isinstance(table, str) and len(table) > 0: - print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '=')) - print(table) - - for period in backtest_breakdown: - if period in results.get('periodic_breakdown', {}): - days_breakdown_stats = results['periodic_breakdown'][period] - else: - days_breakdown_stats = generate_periodic_breakdown_stats( - trade_list=results['trades'], period=period) - table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats, - stake_currency=stake_currency, period=period) - if isinstance(table, str) and len(table) > 0: - print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '=')) - print(table) - - table = text_table_add_metrics(results) - if isinstance(table, str) and len(table) > 0: - print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '=')) - print(table) - - if isinstance(table, str) and len(table) > 0: - print('=' * len(table.splitlines()[0])) - - print() - - -def show_backtest_results(config: Config, backtest_stats: Dict): - stake_currency = config['stake_currency'] - - for strategy, results in backtest_stats['strategy'].items(): - show_backtest_result( - strategy, results, stake_currency, - config.get('backtest_breakdown', [])) - - if len(backtest_stats['strategy']) > 0: - # Print Strategy summary table - - table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency) - print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |" - f" Max open trades : {results['max_open_trades']}") - print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '=')) - print(table) - print('=' * len(table.splitlines()[0])) - print('\nFor more details, please look at the detail tables above') - - -def show_sorted_pairlist(config: Config, backtest_stats: Dict): - if config.get('backtest_show_pair_list', False): - for strategy, results in backtest_stats['strategy'].items(): - print(f"Pairs for Strategy {strategy}: \n[") - for result in results['results_per_pair']: - if result["key"] != 'TOTAL': - print(f'"{result["key"]}", // {result["profit_mean"]:.2%}') - print("]") diff --git a/freqtrade/persistence/key_value_store.py b/freqtrade/persistence/key_value_store.py index 110a23d6c..6da7265d6 100644 --- a/freqtrade/persistence/key_value_store.py +++ b/freqtrade/persistence/key_value_store.py @@ -42,7 +42,7 @@ class _KeyValueStoreModel(ModelBase): int_value: Mapped[Optional[int]] -class KeyValueStore(): +class KeyValueStore: """ Generic bot-wide, persistent key-value store Can be used to store generic values, e.g. very first bot startup time. diff --git a/freqtrade/persistence/pairlock_middleware.py b/freqtrade/persistence/pairlock_middleware.py index 29169a50d..dd6bacf3a 100644 --- a/freqtrade/persistence/pairlock_middleware.py +++ b/freqtrade/persistence/pairlock_middleware.py @@ -11,7 +11,7 @@ from freqtrade.persistence.models import PairLock logger = logging.getLogger(__name__) -class PairLocks(): +class PairLocks: """ Pairlocks middleware class Abstracts the database layer away so it becomes optional - which will be necessary to support diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py index 5d8aada6b..35a44e3fc 100644 --- a/freqtrade/persistence/trade_model.py +++ b/freqtrade/persistence/trade_model.py @@ -10,6 +10,7 @@ from typing import Any, ClassVar, Dict, List, Optional, Sequence, cast from sqlalchemy import (Enum, Float, ForeignKey, Integer, ScalarResult, Select, String, UniqueConstraint, desc, func, select) from sqlalchemy.orm import Mapped, lazyload, mapped_column, relationship, validates +from typing_extensions import Self from freqtrade.constants import (CUSTOM_TAG_MAX_LENGTH, DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC, NON_OPEN_EXCHANGE_STATES, BuySell, LongShort) @@ -97,7 +98,7 @@ class Order(ModelBase): @property def safe_filled(self) -> float: - return self.filled if self.filled is not None else self.amount or 0.0 + return self.filled if self.filled is not None else 0.0 @property def safe_cost(self) -> float: @@ -246,15 +247,15 @@ class Order(ModelBase): else: logger.warning(f"Did not find order for {order}.") - @staticmethod + @classmethod def parse_from_ccxt_object( - order: Dict[str, Any], pair: str, side: str, - amount: Optional[float] = None, price: Optional[float] = None) -> 'Order': + cls, order: Dict[str, Any], pair: str, side: str, + amount: Optional[float] = None, price: Optional[float] = None) -> Self: """ Parse an order from a ccxt object and return a new order Object. Optional support for overriding amount and price is only used for test simplification. """ - o = Order( + o = cls( order_id=str(order['id']), ft_order_side=side, ft_pair=pair, @@ -282,7 +283,7 @@ class Order(ModelBase): return Order.session.scalars(select(Order).filter(Order.order_id == order_id)).first() -class LocalTrade(): +class LocalTrade: """ Trade database model. Used in backtesting - must be aligned to Trade model! @@ -703,7 +704,7 @@ class LocalTrade(): self.stoploss_order_id = None self.close_rate_requested = self.stop_loss self.exit_reason = ExitType.STOPLOSS_ON_EXCHANGE.value - if self.is_open: + if self.is_open and order.safe_filled > 0: logger.info(f'{order.order_type.upper()} is hit for {self}.') else: raise ValueError(f'Unknown order type: {order.order_type}') @@ -1391,7 +1392,10 @@ class Trade(ModelBase, LocalTrade): e.g. `(trade_filter=Trade.id == trade_id)` :return: unsorted query object """ - return Trade.session.scalars(Trade.get_trades_query(trade_filter, include_orders)) + query = Trade.get_trades_query(trade_filter, include_orders) + # this sholud remain split. if use_db is False, session is not available and the above will + # raise an exception. + return Trade.session.scalars(query) @staticmethod def get_open_order_trades() -> List['Trade']: @@ -1638,8 +1642,8 @@ class Trade(ModelBase, LocalTrade): )).scalar_one() return trading_volume - @staticmethod - def from_json(json_str: str) -> 'Trade': + @classmethod + def from_json(cls, json_str: str) -> Self: """ Create a Trade instance from a json string. @@ -1649,7 +1653,7 @@ class Trade(ModelBase, LocalTrade): """ import rapidjson data = rapidjson.loads(json_str) - trade = Trade( + trade = cls( id=data["trade_id"], pair=data["pair"], base_currency=data["base_currency"], diff --git a/freqtrade/plugins/pairlist/AgeFilter.py b/freqtrade/plugins/pairlist/AgeFilter.py index 2af86592f..bce789446 100644 --- a/freqtrade/plugins/pairlist/AgeFilter.py +++ b/freqtrade/plugins/pairlist/AgeFilter.py @@ -12,7 +12,7 @@ from freqtrade.constants import Config, ListPairsWithTimeframes from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers from freqtrade.misc import plural -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter from freqtrade.util import PeriodicCache, dt_floor_day, dt_now, dt_ts @@ -68,6 +68,27 @@ class AgeFilter(IPairList): f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}" ) if self._max_days_listed else '') + @staticmethod + def description() -> str: + return "Filter pairs by age (days listed)." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "min_days_listed": { + "type": "number", + "default": 10, + "description": "Minimum Days Listed", + "help": "Minimum number of days a pair must have been listed on the exchange.", + }, + "max_days_listed": { + "type": "number", + "default": None, + "description": "Maximum Days Listed", + "help": "Maximum number of days a pair must have been listed on the exchange.", + }, + } + def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ :param pairlist: pairlist to filter or sort diff --git a/freqtrade/plugins/pairlist/IPairList.py b/freqtrade/plugins/pairlist/IPairList.py index d0382c778..d09b447d4 100644 --- a/freqtrade/plugins/pairlist/IPairList.py +++ b/freqtrade/plugins/pairlist/IPairList.py @@ -4,7 +4,7 @@ PairList Handler base class import logging from abc import ABC, abstractmethod, abstractproperty from copy import deepcopy -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Literal, Optional, TypedDict, Union from freqtrade.constants import Config from freqtrade.exceptions import OperationalException @@ -16,8 +16,44 @@ from freqtrade.mixins import LoggingMixin logger = logging.getLogger(__name__) +class __PairlistParameterBase(TypedDict): + description: str + help: str + + +class __NumberPairlistParameter(__PairlistParameterBase): + type: Literal["number"] + default: Union[int, float, None] + + +class __StringPairlistParameter(__PairlistParameterBase): + type: Literal["string"] + default: Union[str, None] + + +class __OptionPairlistParameter(__PairlistParameterBase): + type: Literal["option"] + default: Union[str, None] + options: List[str] + + +class __BoolPairlistParameter(__PairlistParameterBase): + type: Literal["boolean"] + default: Union[bool, None] + + +PairlistParameter = Union[ + __NumberPairlistParameter, + __StringPairlistParameter, + __OptionPairlistParameter, + __BoolPairlistParameter + ] + + class IPairList(LoggingMixin, ABC): + is_pairlist_generator = False + def __init__(self, exchange: Exchange, pairlistmanager, config: Config, pairlistconfig: Dict[str, Any], pairlist_pos: int) -> None: @@ -53,6 +89,37 @@ class IPairList(LoggingMixin, ABC): If no Pairlist requires tickers, an empty Dict is passed as tickers argument to filter_pairlist """ + return False + + @staticmethod + @abstractmethod + def description() -> str: + """ + Return description of this Pairlist Handler + -> Please overwrite in subclasses + """ + return "" + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + """ + Return parameters used by this Pairlist Handler, and their type + contains a dictionary with the parameter name as key, and a dictionary + with the type and default value. + -> Please overwrite in subclasses + """ + return {} + + @staticmethod + def refresh_period_parameter() -> Dict[str, PairlistParameter]: + return { + "refresh_period": { + "type": "number", + "default": 1800, + "description": "Refresh period", + "help": "Refresh period in seconds", + } + } @abstractmethod def short_desc(self) -> str: diff --git a/freqtrade/plugins/pairlist/OffsetFilter.py b/freqtrade/plugins/pairlist/OffsetFilter.py index 8f21cdd85..af152c7bc 100644 --- a/freqtrade/plugins/pairlist/OffsetFilter.py +++ b/freqtrade/plugins/pairlist/OffsetFilter.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter logger = logging.getLogger(__name__) @@ -43,6 +43,27 @@ class OffsetFilter(IPairList): return f"{self.name} - Taking {self._number_pairs} Pairs, starting from {self._offset}." return f"{self.name} - Offsetting pairs by {self._offset}." + @staticmethod + def description() -> str: + return "Offset pair list filter." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "offset": { + "type": "number", + "default": 0, + "description": "Offset", + "help": "Offset of the pairlist.", + }, + "number_assets": { + "type": "number", + "default": 0, + "description": "Number of assets", + "help": "Number of assets to use from the pairlist, starting from offset.", + }, + } + def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ Filters and sorts pairlist and returns the whitelist again. diff --git a/freqtrade/plugins/pairlist/PerformanceFilter.py b/freqtrade/plugins/pairlist/PerformanceFilter.py index e7fcac1e4..06c504317 100644 --- a/freqtrade/plugins/pairlist/PerformanceFilter.py +++ b/freqtrade/plugins/pairlist/PerformanceFilter.py @@ -9,7 +9,7 @@ import pandas as pd from freqtrade.constants import Config from freqtrade.exchange.types import Tickers from freqtrade.persistence import Trade -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter logger = logging.getLogger(__name__) @@ -40,6 +40,27 @@ class PerformanceFilter(IPairList): """ return f"{self.name} - Sorting pairs by performance." + @staticmethod + def description() -> str: + return "Filter pairs by performance." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "minutes": { + "type": "number", + "default": 0, + "description": "Minutes", + "help": "Consider trades from the last X minutes. 0 means all trades.", + }, + "min_profit": { + "type": "number", + "default": None, + "description": "Minimum profit", + "help": "Minimum profit in percent. Pairs with less profit are removed.", + }, + } + def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ Filters and sorts pairlist and returns the allowlist again. diff --git a/freqtrade/plugins/pairlist/PrecisionFilter.py b/freqtrade/plugins/pairlist/PrecisionFilter.py index 2e74aa293..d354eaf63 100644 --- a/freqtrade/plugins/pairlist/PrecisionFilter.py +++ b/freqtrade/plugins/pairlist/PrecisionFilter.py @@ -46,6 +46,10 @@ class PrecisionFilter(IPairList): """ return f"{self.name} - Filtering untradable pairs." + @staticmethod + def description() -> str: + return "Filters low-value coins which would not allow setting stoplosses." + def _validate_pair(self, pair: str, ticker: Optional[Ticker]) -> bool: """ Check if pair has enough room to add a stoploss to avoid "unsellable" buys of very diff --git a/freqtrade/plugins/pairlist/PriceFilter.py b/freqtrade/plugins/pairlist/PriceFilter.py index 4d23de792..4c8781184 100644 --- a/freqtrade/plugins/pairlist/PriceFilter.py +++ b/freqtrade/plugins/pairlist/PriceFilter.py @@ -7,7 +7,7 @@ from typing import Any, Dict, Optional from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Ticker -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter logger = logging.getLogger(__name__) @@ -65,6 +65,40 @@ class PriceFilter(IPairList): return f"{self.name} - No price filters configured." + @staticmethod + def description() -> str: + return "Filter pairs by price." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "low_price_ratio": { + "type": "number", + "default": 0, + "description": "Low price ratio", + "help": ("Remove pairs where a price move of 1 price unit (pip) " + "is above this ratio."), + }, + "min_price": { + "type": "number", + "default": 0, + "description": "Minimum price", + "help": "Remove pairs with a price below this value.", + }, + "max_price": { + "type": "number", + "default": 0, + "description": "Maximum price", + "help": "Remove pairs with a price above this value.", + }, + "max_value": { + "type": "number", + "default": 0, + "description": "Maximum value", + "help": "Remove pairs with a value (price * amount) above this value.", + }, + } + def _validate_pair(self, pair: str, ticker: Optional[Ticker]) -> bool: """ Check if if one price-step (pip) is > than a certain barrier. diff --git a/freqtrade/plugins/pairlist/ProducerPairList.py b/freqtrade/plugins/pairlist/ProducerPairList.py index 882d49b76..826f05913 100644 --- a/freqtrade/plugins/pairlist/ProducerPairList.py +++ b/freqtrade/plugins/pairlist/ProducerPairList.py @@ -8,7 +8,7 @@ from typing import Any, Dict, List, Optional from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter logger = logging.getLogger(__name__) @@ -28,6 +28,7 @@ class ProducerPairList(IPairList): } ], """ + is_pairlist_generator = True def __init__(self, exchange, pairlistmanager, config: Dict[str, Any], pairlistconfig: Dict[str, Any], @@ -56,6 +57,28 @@ class ProducerPairList(IPairList): """ return f"{self.name} - {self._producer_name}" + @staticmethod + def description() -> str: + return "Get a pairlist from an upstream bot." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "number_assets": { + "type": "number", + "default": 0, + "description": "Number of assets", + "help": "Number of assets to use from the pairlist", + }, + "producer_name": { + "type": "string", + "default": "default", + "description": "Producer name", + "help": ("Name of the producer to use. Requires additional " + "external_message_consumer configuration.") + }, + } + def _filter_pairlist(self, pairlist: Optional[List[str]]): upstream_pairlist = self._pairlistmanager._dataprovider.get_producer_pairs( self._producer_name) diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index d077330e0..372f9a593 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -15,7 +15,7 @@ from freqtrade import __version__ from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter logger = logging.getLogger(__name__) @@ -23,6 +23,8 @@ logger = logging.getLogger(__name__) class RemotePairList(IPairList): + is_pairlist_generator = True + def __init__(self, exchange, pairlistmanager, config: Config, pairlistconfig: Dict[str, Any], pairlist_pos: int) -> None: @@ -63,6 +65,46 @@ class RemotePairList(IPairList): """ return f"{self.name} - {self._pairlistconfig['number_assets']} pairs from RemotePairlist." + @staticmethod + def description() -> str: + return "Retrieve pairs from a remote API." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "number_assets": { + "type": "number", + "default": 0, + "description": "Number of assets", + "help": "Number of assets to use from the pairlist.", + }, + "pairlist_url": { + "type": "string", + "default": "", + "description": "URL to fetch pairlist from", + "help": "URL to fetch pairlist from", + }, + **IPairList.refresh_period_parameter(), + "keep_pairlist_on_failure": { + "type": "boolean", + "default": True, + "description": "Keep last pairlist on failure", + "help": "Keep last pairlist on failure", + }, + "read_timeout": { + "type": "number", + "default": 60, + "description": "Read timeout", + "help": "Request timeout for remote pairlist", + }, + "bearer_token": { + "type": "string", + "default": "", + "description": "Bearer token", + "help": "Bearer token - used for auth against the upstream service.", + }, + } + def process_json(self, jsonparse) -> List[str]: pairlist = jsonparse.get('pairs', []) diff --git a/freqtrade/plugins/pairlist/ShuffleFilter.py b/freqtrade/plugins/pairlist/ShuffleFilter.py index 76d7600d2..ce37dd8b5 100644 --- a/freqtrade/plugins/pairlist/ShuffleFilter.py +++ b/freqtrade/plugins/pairlist/ShuffleFilter.py @@ -9,7 +9,7 @@ from freqtrade.constants import Config from freqtrade.enums import RunMode from freqtrade.exchange import timeframe_to_seconds from freqtrade.exchange.types import Tickers -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter from freqtrade.util.periodic_cache import PeriodicCache @@ -55,6 +55,28 @@ class ShuffleFilter(IPairList): return (f"{self.name} - Shuffling pairs every {self._shuffle_freq}" + (f", seed = {self._seed}." if self._seed is not None else ".")) + @staticmethod + def description() -> str: + return "Randomize pairlist order." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "shuffle_frequency": { + "type": "option", + "default": "candle", + "options": ["candle", "iteration"], + "description": "Shuffle frequency", + "help": "Shuffle frequency. Can be either 'candle' or 'iteration'.", + }, + "seed": { + "type": "number", + "default": None, + "description": "Random Seed", + "help": "Seed for random number generator. Not used in live mode.", + }, + } + def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ Filters and sorts pairlist and returns the whitelist again. diff --git a/freqtrade/plugins/pairlist/SpreadFilter.py b/freqtrade/plugins/pairlist/SpreadFilter.py index d47b68568..ee41cbe66 100644 --- a/freqtrade/plugins/pairlist/SpreadFilter.py +++ b/freqtrade/plugins/pairlist/SpreadFilter.py @@ -7,7 +7,7 @@ from typing import Any, Dict, Optional from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Ticker -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter logger = logging.getLogger(__name__) @@ -45,6 +45,21 @@ class SpreadFilter(IPairList): return (f"{self.name} - Filtering pairs with ask/bid diff above " f"{self._max_spread_ratio:.2%}.") + @staticmethod + def description() -> str: + return "Filter by bid/ask difference." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "max_spread_ratio": { + "type": "number", + "default": 0.005, + "description": "Max spread ratio", + "help": "Max spread ratio for a pair to be considered.", + }, + } + def _validate_pair(self, pair: str, ticker: Optional[Ticker]) -> bool: """ Validate spread for the ticker diff --git a/freqtrade/plugins/pairlist/StaticPairList.py b/freqtrade/plugins/pairlist/StaticPairList.py index 4b1961a53..16fb97adb 100644 --- a/freqtrade/plugins/pairlist/StaticPairList.py +++ b/freqtrade/plugins/pairlist/StaticPairList.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List from freqtrade.constants import Config from freqtrade.exchange.types import Tickers -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter logger = logging.getLogger(__name__) @@ -17,6 +17,8 @@ logger = logging.getLogger(__name__) class StaticPairList(IPairList): + is_pairlist_generator = True + def __init__(self, exchange, pairlistmanager, config: Config, pairlistconfig: Dict[str, Any], pairlist_pos: int) -> None: @@ -40,6 +42,21 @@ class StaticPairList(IPairList): """ return f"{self.name}" + @staticmethod + def description() -> str: + return "Use pairlist as configured in config." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "allow_inactive": { + "type": "boolean", + "default": False, + "description": "Allow inactive pairs", + "help": "Allow inactive pairs to be in the whitelist.", + }, + } + def gen_pairlist(self, tickers: Tickers) -> List[str]: """ Generate the pairlist diff --git a/freqtrade/plugins/pairlist/VolatilityFilter.py b/freqtrade/plugins/pairlist/VolatilityFilter.py index 61a1dcbf0..800bf3664 100644 --- a/freqtrade/plugins/pairlist/VolatilityFilter.py +++ b/freqtrade/plugins/pairlist/VolatilityFilter.py @@ -15,7 +15,7 @@ from freqtrade.constants import Config, ListPairsWithTimeframes from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers from freqtrade.misc import plural -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter from freqtrade.util import dt_floor_day, dt_now, dt_ts @@ -64,6 +64,34 @@ class VolatilityFilter(IPairList): f"{self._min_volatility}-{self._max_volatility} " f" the last {self._days} {plural(self._days, 'day')}.") + @staticmethod + def description() -> str: + return "Filter pairs by their recent volatility." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "lookback_days": { + "type": "number", + "default": 10, + "description": "Lookback Days", + "help": "Number of days to look back at.", + }, + "min_volatility": { + "type": "number", + "default": 0, + "description": "Minimum Volatility", + "help": "Minimum volatility a pair must have to be considered.", + }, + "max_volatility": { + "type": "number", + "default": None, + "description": "Maximum Volatility", + "help": "Maximum volatility a pair must have to be considered.", + }, + **IPairList.refresh_period_parameter() + } + def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ Validate trading range diff --git a/freqtrade/plugins/pairlist/VolumePairList.py b/freqtrade/plugins/pairlist/VolumePairList.py index b9c312f87..0d5e33847 100644 --- a/freqtrade/plugins/pairlist/VolumePairList.py +++ b/freqtrade/plugins/pairlist/VolumePairList.py @@ -14,7 +14,7 @@ from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_minutes, timeframe_to_prev_date from freqtrade.exchange.types import Tickers from freqtrade.misc import format_ms_time -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter from freqtrade.util import dt_now @@ -26,6 +26,8 @@ SORT_VALUES = ['quoteVolume'] class VolumePairList(IPairList): + is_pairlist_generator = True + def __init__(self, exchange, pairlistmanager, config: Config, pairlistconfig: Dict[str, Any], pairlist_pos: int) -> None: @@ -112,6 +114,53 @@ class VolumePairList(IPairList): """ return f"{self.name} - top {self._pairlistconfig['number_assets']} volume pairs." + @staticmethod + def description() -> str: + return "Provides dynamic pair list based on trade volumes." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "number_assets": { + "type": "number", + "default": 30, + "description": "Number of assets", + "help": "Number of assets to use from the pairlist", + }, + "sort_key": { + "type": "option", + "default": "quoteVolume", + "options": SORT_VALUES, + "description": "Sort key", + "help": "Sort key to use for sorting the pairlist.", + }, + "min_value": { + "type": "number", + "default": 0, + "description": "Minimum value", + "help": "Minimum value to use for filtering the pairlist.", + }, + **IPairList.refresh_period_parameter(), + "lookback_days": { + "type": "number", + "default": 0, + "description": "Lookback Days", + "help": "Number of days to look back at.", + }, + "lookback_timeframe": { + "type": "string", + "default": "", + "description": "Lookback Timeframe", + "help": "Timeframe to use for lookback.", + }, + "lookback_period": { + "type": "number", + "default": 0, + "description": "Lookback Period", + "help": "Number of periods to look back at.", + }, + } + def gen_pairlist(self, tickers: Tickers) -> List[str]: """ Generate the pairlist diff --git a/freqtrade/plugins/pairlist/rangestabilityfilter.py b/freqtrade/plugins/pairlist/rangestabilityfilter.py index 1181b2812..f294b882b 100644 --- a/freqtrade/plugins/pairlist/rangestabilityfilter.py +++ b/freqtrade/plugins/pairlist/rangestabilityfilter.py @@ -13,7 +13,7 @@ from freqtrade.constants import Config, ListPairsWithTimeframes from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers from freqtrade.misc import plural -from freqtrade.plugins.pairlist.IPairList import IPairList +from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter from freqtrade.util import dt_floor_day, dt_now, dt_ts @@ -62,6 +62,34 @@ class RangeStabilityFilter(IPairList): f"{self._min_rate_of_change}{max_rate_desc} over the " f"last {plural(self._days, 'day')}.") + @staticmethod + def description() -> str: + return "Filters pairs by their rate of change." + + @staticmethod + def available_parameters() -> Dict[str, PairlistParameter]: + return { + "lookback_days": { + "type": "number", + "default": 10, + "description": "Lookback Days", + "help": "Number of days to look back at.", + }, + "min_rate_of_change": { + "type": "number", + "default": 0.01, + "description": "Minimum Rate of Change", + "help": "Minimum rate of change to filter pairs.", + }, + "max_rate_of_change": { + "type": "number", + "default": None, + "description": "Maximum Rate of Change", + "help": "Maximum rate of change to filter pairs.", + }, + **IPairList.refresh_period_parameter() + } + def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ Validate trading range diff --git a/freqtrade/plugins/protectionmanager.py b/freqtrade/plugins/protectionmanager.py index 54432e677..6e55ade11 100644 --- a/freqtrade/plugins/protectionmanager.py +++ b/freqtrade/plugins/protectionmanager.py @@ -15,7 +15,7 @@ from freqtrade.resolvers import ProtectionResolver logger = logging.getLogger(__name__) -class ProtectionManager(): +class ProtectionManager: def __init__(self, config: Config, protections: List) -> None: self._config = config diff --git a/freqtrade/resolvers/exchange_resolver.py b/freqtrade/resolvers/exchange_resolver.py index c5c4e1a68..2f912c4ab 100644 --- a/freqtrade/resolvers/exchange_resolver.py +++ b/freqtrade/resolvers/exchange_resolver.py @@ -2,7 +2,8 @@ This module loads custom exchanges """ import logging -from typing import Optional +from inspect import isclass +from typing import Any, Dict, List, Optional import freqtrade.exchange as exchanges from freqtrade.constants import Config, ExchangeConfig @@ -72,3 +73,26 @@ class ExchangeResolver(IResolver): f"Impossible to load Exchange '{exchange_name}'. This class does not exist " "or contains Python code errors." ) + + @classmethod + def search_all_objects(cls, config: Config, enum_failed: bool, + recursive: bool = False) -> List[Dict[str, Any]]: + """ + Searches for valid objects + :param config: Config object + :param enum_failed: If True, will return None for modules which fail. + Otherwise, failing modules are skipped. + :param recursive: Recursively walk directory tree searching for strategies + :return: List of dicts containing 'name', 'class' and 'location' entries + """ + result = [] + for exchange_name in dir(exchanges): + exchange = getattr(exchanges, exchange_name) + if isclass(exchange) and issubclass(exchange, Exchange): + result.append({ + 'name': exchange_name, + 'class': exchange, + 'location': exchange.__module__, + 'location_rel: ': exchange.__module__.replace('freqtrade.', ''), + }) + return result diff --git a/freqtrade/resolvers/freqaimodel_resolver.py b/freqtrade/resolvers/freqaimodel_resolver.py index 48c3facac..3696b9e56 100644 --- a/freqtrade/resolvers/freqaimodel_resolver.py +++ b/freqtrade/resolvers/freqaimodel_resolver.py @@ -34,7 +34,7 @@ class FreqaiModelResolver(IResolver): Load the custom class from config parameter :param config: configuration dictionary """ - disallowed_models = ["BaseRegressionModel", "BaseTensorFlowModel"] + disallowed_models = ["BaseRegressionModel"] freqaimodel_name = config.get("freqaimodel") if not freqaimodel_name: diff --git a/freqtrade/resolvers/iresolver.py b/freqtrade/resolvers/iresolver.py index 2b20560e2..1557f0f35 100644 --- a/freqtrade/resolvers/iresolver.py +++ b/freqtrade/resolvers/iresolver.py @@ -41,7 +41,7 @@ class IResolver: object_type: Type[Any] object_type_str: str user_subdir: Optional[str] = None - initial_search_path: Optional[Path] + initial_search_path: Optional[Path] = None # Optional config setting containing a path (strategy_path, freqaimodel_path) extra_path: Optional[str] = None diff --git a/freqtrade/rpc/api_server/api_background_tasks.py b/freqtrade/rpc/api_server/api_background_tasks.py new file mode 100644 index 000000000..c13fa31e4 --- /dev/null +++ b/freqtrade/rpc/api_server/api_background_tasks.py @@ -0,0 +1,145 @@ +import logging +from copy import deepcopy + +from fastapi import APIRouter, BackgroundTasks, Depends +from fastapi.exceptions import HTTPException + +from freqtrade.constants import Config +from freqtrade.enums import CandleType +from freqtrade.exceptions import OperationalException +from freqtrade.rpc.api_server.api_schemas import (BackgroundTaskStatus, BgJobStarted, + ExchangeModePayloadMixin, PairListsPayload, + PairListsResponse, WhitelistEvaluateResponse) +from freqtrade.rpc.api_server.deps import get_config, get_exchange +from freqtrade.rpc.api_server.webserver_bgwork import ApiBG + + +logger = logging.getLogger(__name__) + +# Private API, protected by authentication and webserver_mode dependency +router = APIRouter() + + +@router.get('/background/{jobid}', response_model=BackgroundTaskStatus, tags=['webserver']) +def background_job(jobid: str): + if not (job := ApiBG.jobs.get(jobid)): + raise HTTPException(status_code=404, detail='Job not found.') + + return { + 'job_id': jobid, + 'job_category': job['category'], + 'status': job['status'], + 'running': job['is_running'], + 'progress': job.get('progress'), + # 'job_error': job['error'], + } + + +@router.get('/pairlists/available', + response_model=PairListsResponse, tags=['pairlists', 'webserver']) +def list_pairlists(config=Depends(get_config)): + from freqtrade.resolvers import PairListResolver + pairlists = PairListResolver.search_all_objects( + config, False) + pairlists = sorted(pairlists, key=lambda x: x['name']) + + return {'pairlists': [{ + "name": x['name'], + "is_pairlist_generator": x['class'].is_pairlist_generator, + "params": x['class'].available_parameters(), + "description": x['class'].description(), + } for x in pairlists + ]} + + +def __run_pairlist(job_id: str, config_loc: Config): + try: + + ApiBG.jobs[job_id]['is_running'] = True + from freqtrade.plugins.pairlistmanager import PairListManager + + exchange = get_exchange(config_loc) + pairlists = PairListManager(exchange, config_loc) + pairlists.refresh_pairlist() + ApiBG.jobs[job_id]['result'] = { + 'method': pairlists.name_list, + 'length': len(pairlists.whitelist), + 'whitelist': pairlists.whitelist + } + ApiBG.jobs[job_id]['status'] = 'success' + except (OperationalException, Exception) as e: + logger.exception(e) + ApiBG.jobs[job_id]['error'] = str(e) + ApiBG.jobs[job_id]['status'] = 'failed' + finally: + ApiBG.jobs[job_id]['is_running'] = False + ApiBG.pairlist_running = False + + +@router.post('/pairlists/evaluate', response_model=BgJobStarted, tags=['pairlists', 'webserver']) +def pairlists_evaluate(payload: PairListsPayload, background_tasks: BackgroundTasks, + config=Depends(get_config)): + if ApiBG.pairlist_running: + raise HTTPException(status_code=400, detail='Pairlist evaluation is already running.') + + config_loc = deepcopy(config) + config_loc['stake_currency'] = payload.stake_currency + config_loc['pairlists'] = payload.pairlists + handleExchangePayload(payload, config_loc) + # TODO: overwrite blacklist? make it optional and fall back to the one in config? + # Outcome depends on the UI approach. + config_loc['exchange']['pair_blacklist'] = payload.blacklist + # Random job id + job_id = ApiBG.get_job_id() + + ApiBG.jobs[job_id] = { + 'category': 'pairlist', + 'status': 'pending', + 'progress': None, + 'is_running': False, + 'result': {}, + 'error': None, + } + background_tasks.add_task(__run_pairlist, job_id, config_loc) + ApiBG.pairlist_running = True + + return { + 'status': 'Pairlist evaluation started in background.', + 'job_id': job_id, + } + + +def handleExchangePayload(payload: ExchangeModePayloadMixin, config_loc: Config): + """ + Handle exchange and trading mode payload. + Updates the configuration with the payload values. + """ + if payload.exchange: + config_loc['exchange']['name'] = payload.exchange + if payload.trading_mode: + config_loc['trading_mode'] = payload.trading_mode + config_loc['candle_type_def'] = CandleType.get_default( + config_loc.get('trading_mode', 'spot') or 'spot') + if payload.margin_mode: + config_loc['margin_mode'] = payload.margin_mode + + +@router.get('/pairlists/evaluate/{jobid}', response_model=WhitelistEvaluateResponse, + tags=['pairlists', 'webserver']) +def pairlists_evaluate_get(jobid: str): + if not (job := ApiBG.jobs.get(jobid)): + raise HTTPException(status_code=404, detail='Job not found.') + + if job['is_running']: + raise HTTPException(status_code=400, detail='Job not finished yet.') + + if error := job['error']: + return { + 'status': 'failed', + 'error': error, + } + + return { + 'status': 'success', + 'result': job['result'], + } diff --git a/freqtrade/rpc/api_server/api_backtest.py b/freqtrade/rpc/api_server/api_backtest.py index 8fa1a87b8..411ba4978 100644 --- a/freqtrade/rpc/api_server/api_backtest.py +++ b/freqtrade/rpc/api_server/api_backtest.py @@ -16,14 +16,14 @@ from freqtrade.exchange.common import remove_exchange_credentials from freqtrade.misc import deep_merge_dicts from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest, BacktestResponse) -from freqtrade.rpc.api_server.deps import get_config, is_webserver_mode +from freqtrade.rpc.api_server.deps import get_config from freqtrade.rpc.api_server.webserver_bgwork import ApiBG from freqtrade.rpc.rpc import RPCException logger = logging.getLogger(__name__) -# Private API, protected by authentication +# Private API, protected by authentication and webserver_mode dependency router = APIRouter() @@ -102,7 +102,7 @@ def __run_backtest_bg(btconfig: Config): @router.post('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest']) async def api_start_backtest( bt_settings: BacktestRequest, background_tasks: BackgroundTasks, - config=Depends(get_config), ws_mode=Depends(is_webserver_mode)): + config=Depends(get_config)): ApiBG.bt['bt_error'] = None """Start backtesting if not done so already""" if ApiBG.bgtask_running: @@ -143,7 +143,7 @@ async def api_start_backtest( @router.get('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest']) -def api_get_backtest(ws_mode=Depends(is_webserver_mode)): +def api_get_backtest(): """ Get backtesting result. Returns Result after backtesting has been ran. @@ -188,7 +188,7 @@ def api_get_backtest(ws_mode=Depends(is_webserver_mode)): @router.delete('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest']) -def api_delete_backtest(ws_mode=Depends(is_webserver_mode)): +def api_delete_backtest(): """Reset backtesting""" if ApiBG.bgtask_running: return { @@ -215,7 +215,7 @@ def api_delete_backtest(ws_mode=Depends(is_webserver_mode)): @router.get('/backtest/abort', response_model=BacktestResponse, tags=['webserver', 'backtest']) -def api_backtest_abort(ws_mode=Depends(is_webserver_mode)): +def api_backtest_abort(): if not ApiBG.bgtask_running: return { "status": "not_running", @@ -236,15 +236,14 @@ def api_backtest_abort(ws_mode=Depends(is_webserver_mode)): @router.get('/backtest/history', response_model=List[BacktestHistoryEntry], tags=['webserver', 'backtest']) -def api_backtest_history(config=Depends(get_config), ws_mode=Depends(is_webserver_mode)): +def api_backtest_history(config=Depends(get_config)): # Get backtest result history, read from metadata files return get_backtest_resultlist(config['user_data_dir'] / 'backtest_results') @router.get('/backtest/history/result', response_model=BacktestResponse, tags=['webserver', 'backtest']) -def api_backtest_history_result(filename: str, strategy: str, config=Depends(get_config), - ws_mode=Depends(is_webserver_mode)): +def api_backtest_history_result(filename: str, strategy: str, config=Depends(get_config)): # Get backtest result history, read from metadata files fn = config['user_data_dir'] / 'backtest_results' / filename results: Dict[str, Any] = { diff --git a/freqtrade/rpc/api_server/api_schemas.py b/freqtrade/rpc/api_server/api_schemas.py index a081f9fe9..3f4dd99e1 100644 --- a/freqtrade/rpc/api_server/api_schemas.py +++ b/freqtrade/rpc/api_server/api_schemas.py @@ -4,7 +4,14 @@ from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel from freqtrade.constants import DATETIME_PRINT_FORMAT, IntOrInf -from freqtrade.enums import OrderTypeValues, SignalDirection, TradingMode +from freqtrade.enums import MarginMode, OrderTypeValues, SignalDirection, TradingMode +from freqtrade.types import ValidExchangesType + + +class ExchangeModePayloadMixin(BaseModel): + trading_mode: Optional[TradingMode] + margin_mode: Optional[MarginMode] + exchange: Optional[str] class Ping(BaseModel): @@ -27,6 +34,23 @@ class StatusMsg(BaseModel): status: str +class BgJobStarted(StatusMsg): + job_id: str + + +class BackgroundTaskStatus(BaseModel): + job_id: str + job_category: str + status: str + running: bool + progress: Optional[float] + + +class BackgroundTaskResult(BaseModel): + error: Optional[str] + status: str + + class ResultMsg(BaseModel): result: str @@ -376,6 +400,10 @@ class WhitelistResponse(BaseModel): method: List[str] +class WhitelistEvaluateResponse(BackgroundTaskResult): + result: Optional[WhitelistResponse] + + class DeleteTrade(BaseModel): cancel_order_count: int result: str @@ -396,6 +424,27 @@ class StrategyListResponse(BaseModel): strategies: List[str] +class ExchangeListResponse(BaseModel): + exchanges: List[ValidExchangesType] + + +class PairListResponse(BaseModel): + name: str + description: str + is_pairlist_generator: bool + params: Dict[str, Any] + + +class PairListsResponse(BaseModel): + pairlists: List[PairListResponse] + + +class PairListsPayload(ExchangeModePayloadMixin, BaseModel): + pairlists: List[Dict[str, Any]] + blacklist: List[str] + stake_currency: str + + class FreqAIModelListResponse(BaseModel): freqaimodels: List[str] diff --git a/freqtrade/rpc/api_server/api_v1.py b/freqtrade/rpc/api_server/api_v1.py index 2354c4bf8..143f110f0 100644 --- a/freqtrade/rpc/api_server/api_v1.py +++ b/freqtrade/rpc/api_server/api_v1.py @@ -12,7 +12,8 @@ from freqtrade.exceptions import OperationalException from freqtrade.rpc import RPC from freqtrade.rpc.api_server.api_schemas import (AvailablePairs, Balances, BlacklistPayload, BlacklistResponse, Count, Daily, - DeleteLockRequest, DeleteTrade, ForceEnterPayload, + DeleteLockRequest, DeleteTrade, + ExchangeListResponse, ForceEnterPayload, ForceEnterResponse, ForceExitPayload, FreqAIModelListResponse, Health, Locks, Logs, OpenTradeSchema, PairHistory, PerformanceEntry, @@ -46,7 +47,9 @@ logger = logging.getLogger(__name__) # 2.26: increase /balance output # 2.27: Add /trades//reload endpoint # 2.28: Switch reload endpoint to Post -API_VERSION = 2.28 +# 2.29: Add /exchanges endpoint +# 2.30: new /pairlists endpoint +API_VERSION = 2.30 # Public API, requires no auth. router_public = APIRouter() @@ -312,6 +315,15 @@ def get_strategy(strategy: str, config=Depends(get_config)): } +@router.get('/exchanges', response_model=ExchangeListResponse, tags=[]) +def list_exchanges(config=Depends(get_config)): + from freqtrade.exchange import list_available_exchanges + exchanges = list_available_exchanges(config) + return { + 'exchanges': exchanges, + } + + @router.get('/freqaimodels', response_model=FreqAIModelListResponse, tags=['freqai']) def list_freqaimodels(config=Depends(get_config)): from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index b253d66c2..40a5a75fd 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -7,12 +7,14 @@ from fastapi.websockets import WebSocket from pydantic import ValidationError from freqtrade.enums import RPCMessageType, RPCRequestType +from freqtrade.exceptions import FreqtradeException from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel from freqtrade.rpc.api_server.ws.message_stream import MessageStream -from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSMessageSchema, - WSRequestSchema, WSWhitelistMessage) +from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSErrorMessage, + WSMessageSchema, WSRequestSchema, + WSWhitelistMessage) from freqtrade.rpc.rpc import RPC @@ -27,7 +29,13 @@ async def channel_reader(channel: WebSocketChannel, rpc: RPC): Iterate over the messages from the channel and process the request """ async for message in channel: - await _process_consumer_request(message, channel, rpc) + try: + await _process_consumer_request(message, channel, rpc) + except FreqtradeException: + logger.exception(f"Error processing request from {channel}") + response = WSErrorMessage(data='Error processing request') + + await channel.send(response.dict(exclude_none=True)) async def channel_broadcaster(channel: WebSocketChannel, message_stream: MessageStream): @@ -62,13 +70,13 @@ async def _process_consumer_request( logger.error(f"Invalid request from {channel}: {e}") return - type, data = websocket_request.type, websocket_request.data + type_, data = websocket_request.type, websocket_request.data response: WSMessageSchema - logger.debug(f"Request of type {type} from {channel}") + logger.debug(f"Request of type {type_} from {channel}") # If we have a request of type SUBSCRIBE, set the topics in this channel - if type == RPCRequestType.SUBSCRIBE: + if type_ == RPCRequestType.SUBSCRIBE: # If the request is empty, do nothing if not data: return @@ -80,7 +88,7 @@ async def _process_consumer_request( # We don't send a response for subscriptions return - elif type == RPCRequestType.WHITELIST: + elif type_ == RPCRequestType.WHITELIST: # Get whitelist whitelist = rpc._ws_request_whitelist() @@ -88,7 +96,7 @@ async def _process_consumer_request( response = WSWhitelistMessage(data=whitelist) await channel.send(response.dict(exclude_none=True)) - elif type == RPCRequestType.ANALYZED_DF: + elif type_ == RPCRequestType.ANALYZED_DF: # Limit the amount of candles per dataframe to 'limit' or 1500 limit = int(min(data.get('limit', 1500), 1500)) if data else None pair = data.get('pair', None) if data else None diff --git a/freqtrade/rpc/api_server/deps.py b/freqtrade/rpc/api_server/deps.py index 8fd105d3e..bface89bd 100644 --- a/freqtrade/rpc/api_server/deps.py +++ b/freqtrade/rpc/api_server/deps.py @@ -1,8 +1,9 @@ from typing import Any, AsyncIterator, Dict, Optional from uuid import uuid4 -from fastapi import Depends +from fastapi import Depends, HTTPException +from freqtrade.constants import Config from freqtrade.enums import RunMode from freqtrade.persistence import Trade from freqtrade.persistence.models import _request_id_ctx_var @@ -43,12 +44,21 @@ def get_api_config() -> Dict[str, Any]: return ApiServer._config['api_server'] +def _generate_exchange_key(config: Config) -> str: + """ + Exchange key - used for caching the exchange object. + """ + return f"{config['exchange']['name']}_{config.get('trading_mode', 'spot')}" + + def get_exchange(config=Depends(get_config)): - if not ApiBG.exchange: + exchange_key = _generate_exchange_key(config) + if not (exchange := ApiBG.exchanges.get(exchange_key)): from freqtrade.resolvers import ExchangeResolver - ApiBG.exchange = ExchangeResolver.load_exchange( + exchange = ExchangeResolver.load_exchange( config, load_leverage_tiers=False) - return ApiBG.exchange + ApiBG.exchanges[exchange_key] = exchange + return exchange def get_message_stream(): @@ -57,5 +67,6 @@ def get_message_stream(): def is_webserver_mode(config=Depends(get_config)): if config['runmode'] != RunMode.WEBSERVER: - raise RPCException('Bot is not in the correct state') + raise HTTPException(status_code=503, + detail='Bot is not in the correct state.') return None diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index 165849a7f..4d934eee3 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -114,10 +114,12 @@ class ApiServer(RPCHandler): def configure_app(self, app: FastAPI, config): from freqtrade.rpc.api_server.api_auth import http_basic_or_jwt_token, router_login + from freqtrade.rpc.api_server.api_background_tasks import router as api_bg_tasks from freqtrade.rpc.api_server.api_backtest import router as api_backtest from freqtrade.rpc.api_server.api_v1 import router as api_v1 from freqtrade.rpc.api_server.api_v1 import router_public as api_v1_public from freqtrade.rpc.api_server.api_ws import router as ws_router + from freqtrade.rpc.api_server.deps import is_webserver_mode from freqtrade.rpc.api_server.web_ui import router_ui app.include_router(api_v1_public, prefix="/api/v1") @@ -126,7 +128,12 @@ class ApiServer(RPCHandler): dependencies=[Depends(http_basic_or_jwt_token)], ) app.include_router(api_backtest, prefix="/api/v1", - dependencies=[Depends(http_basic_or_jwt_token)], + dependencies=[Depends(http_basic_or_jwt_token), + Depends(is_webserver_mode)], + ) + app.include_router(api_bg_tasks, prefix="/api/v1", + dependencies=[Depends(http_basic_or_jwt_token), + Depends(is_webserver_mode)], ) app.include_router(ws_router, prefix="/api/v1") app.include_router(router_login, prefix="/api/v1", tags=["auth"]) diff --git a/freqtrade/rpc/api_server/webserver_bgwork.py b/freqtrade/rpc/api_server/webserver_bgwork.py index 925f34de3..13f45227e 100644 --- a/freqtrade/rpc/api_server/webserver_bgwork.py +++ b/freqtrade/rpc/api_server/webserver_bgwork.py @@ -1,8 +1,20 @@ -from typing import Any, Dict +from typing import Any, Dict, Literal, Optional, TypedDict +from uuid import uuid4 + +from freqtrade.exchange.exchange import Exchange -class ApiBG(): +class JobsContainer(TypedDict): + category: Literal['pairlist'] + is_running: bool + status: str + progress: Optional[float] + result: Any + error: Optional[str] + + +class ApiBG: # Backtesting type: Backtesting bt: Dict[str, Any] = { 'bt': None, @@ -13,4 +25,15 @@ class ApiBG(): } bgtask_running: bool = False # Exchange - only available in webserver mode. - exchange = None + exchanges: Dict[str, Exchange] = {} + + # Generic background jobs + + # TODO: Change this to TTLCache + jobs: Dict[str, JobsContainer] = {} + # Pairlist evaluate things + pairlist_running: bool = False + + @staticmethod + def get_job_id() -> str: + return str(uuid4()) diff --git a/freqtrade/rpc/api_server/ws_schemas.py b/freqtrade/rpc/api_server/ws_schemas.py index 292672b60..af98bd532 100644 --- a/freqtrade/rpc/api_server/ws_schemas.py +++ b/freqtrade/rpc/api_server/ws_schemas.py @@ -66,4 +66,9 @@ class WSAnalyzedDFMessage(WSMessageSchema): type: RPCMessageType = RPCMessageType.ANALYZED_DF data: AnalyzedDFData + +class WSErrorMessage(WSMessageSchema): + type: RPCMessageType = RPCMessageType.EXCEPTION + data: str + # -------------------------------------------------------------------------- diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index dedb35503..c3759e03a 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -755,7 +755,7 @@ class RPC: return {'status': 'Reloaded from orders from exchange'} def __exec_force_exit(self, trade: Trade, ordertype: Optional[str], - amount: Optional[float] = None) -> None: + amount: Optional[float] = None) -> bool: # Check if there is there is an open order fully_canceled = False if trade.open_order_id: @@ -770,6 +770,9 @@ class RPC: self._freqtrade.handle_cancel_exit(trade, order, CANCEL_REASON['FORCE_EXIT']) if not fully_canceled: + if trade.open_order_id is not None: + # Order cancellation failed, so we can't exit. + return False # Get current rate and execute sell current_rate = self._freqtrade.exchange.get_rate( trade.pair, side='exit', is_short=trade.is_short, refresh=True) @@ -790,6 +793,9 @@ class RPC: trade, current_rate, exit_check, ordertype=order_type, sub_trade_amt=sub_amount) + return True + return False + def _rpc_force_exit(self, trade_id: str, ordertype: Optional[str] = None, *, amount: Optional[float] = None) -> Dict[str, str]: """ @@ -802,12 +808,12 @@ class RPC: with self._freqtrade._exit_lock: if trade_id == 'all': - # Execute sell for all open orders + # Execute exit for all open orders for trade in Trade.get_open_trades(): self.__exec_force_exit(trade, ordertype) Trade.commit() self._freqtrade.wallets.update() - return {'result': 'Created sell orders for all open trades.'} + return {'result': 'Created exit orders for all open trades.'} # Query for trade trade = Trade.get_trades( @@ -817,10 +823,12 @@ class RPC: logger.warning('force_exit: Invalid argument received') raise RPCException('invalid argument') - self.__exec_force_exit(trade, ordertype, amount) + result = self.__exec_force_exit(trade, ordertype, amount) Trade.commit() self._freqtrade.wallets.update() - return {'result': f'Created sell order for trade {trade_id}.'} + if not result: + raise RPCException('Failed to exit trade.') + return {'result': f'Created exit order for trade {trade_id}.'} def _force_entry_validations(self, pair: str, order_side: SignalDirection): if not self._freqtrade.config.get('force_entry_enable', False): diff --git a/freqtrade/rpc/telegram.py b/freqtrade/rpc/telegram.py index d082299cb..aad7fd8c4 100644 --- a/freqtrade/rpc/telegram.py +++ b/freqtrade/rpc/telegram.py @@ -534,10 +534,10 @@ class Telegram(RPCHandler): if order_nr == 1: lines.append(f"*{wording} #{order_nr}:*") lines.append( - f"*Amount:* {cur_entry_amount} " + f"*Amount:* {cur_entry_amount:.8g} " f"({round_coin_value(order['cost'], quote_currency)})" ) - lines.append(f"*Average Price:* {cur_entry_average}") + lines.append(f"*Average Price:* {cur_entry_average:.8g}") else: sum_stake = 0 sum_amount = 0 @@ -560,9 +560,9 @@ class Telegram(RPCHandler): if is_open: lines.append("({})".format(dt_humanize(order["order_filled_date"], granularity=["day", "hour", "minute"]))) - lines.append(f"*Amount:* {cur_entry_amount} " + lines.append(f"*Amount:* {cur_entry_amount:.8g} " f"({round_coin_value(order['cost'], quote_currency)})") - lines.append(f"*Average {wording} Price:* {cur_entry_average} " + lines.append(f"*Average {wording} Price:* {cur_entry_average:.8g} " f"({price_to_1st_entry:.2%} from 1st entry Rate)") lines.append(f"*Order filled:* {order['order_filled_date']}") @@ -633,11 +633,11 @@ class Telegram(RPCHandler): ]) lines.extend([ - "*Open Rate:* `{open_rate:.8f}`", - "*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "", + "*Open Rate:* `{open_rate:.8g}`", + "*Close Rate:* `{close_rate:.8g}`" if r['close_rate'] else "", "*Open Date:* `{open_date}`", "*Close Date:* `{close_date}`" if r['close_date'] else "", - " \n*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "", + " \n*Current Rate:* `{current_rate:.8g}`" if r['is_open'] else "", ("*Unrealized Profit:* " if r['is_open'] else "*Close Profit: *") + "`{profit_ratio:.2%}` `({profit_abs_r})`", ]) @@ -658,9 +658,9 @@ class Telegram(RPCHandler): "`({initial_stop_loss_ratio:.2%})`") # Adding stoploss and stoploss percentage only if it is not None - lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " + + lines.append("*Stoploss:* `{stop_loss_abs:.8g}` " + ("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else "")) - lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` " + lines.append("*Stoploss distance:* `{stoploss_current_dist:.8g}` " "`({stoploss_current_dist_ratio:.2%})`") if r['open_order']: lines.append( @@ -1114,7 +1114,9 @@ class Telegram(RPCHandler): async def _force_exit_action(self, trade_id): if trade_id != 'cancel': try: - self._rpc._rpc_force_exit(trade_id) + loop = asyncio.get_running_loop() + # Workaround to avoid nested loops + await loop.run_in_executor(None, self._rpc._rpc_force_exit, trade_id) except RPCException as e: await self._send_msg(str(e)) @@ -1140,7 +1142,11 @@ class Telegram(RPCHandler): async def _force_enter_action(self, pair, price: Optional[float], order_side: SignalDirection): if pair != 'cancel': try: - self._rpc._rpc_force_entry(pair, price, order_side=order_side) + def _force_enter(): + self._rpc._rpc_force_entry(pair, price, order_side=order_side) + loop = asyncio.get_running_loop() + # Workaround to avoid nested loops + await loop.run_in_executor(None, _force_enter) except RPCException as e: logger.exception("Forcebuy error!") await self._send_msg(str(e), ParseMode.HTML) diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index 382f38c9a..1e9ebe1ae 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -48,7 +48,7 @@ class IStrategy(ABC, HyperStrategyMixin): _ft_params_from_file: Dict # associated minimal roi - minimal_roi: Dict = {"0": 10.0} + minimal_roi: Dict = {} # associated stoploss stoploss: float @@ -168,7 +168,7 @@ class IStrategy(ABC, HyperStrategyMixin): download_all_data_for_training(self.dp, self.config) else: # Gracious failures if freqAI is disabled but "start" is called. - class DummyClass(): + class DummyClass: def start(self, *args, **kwargs): raise OperationalException( 'freqAI is not enabled. ' @@ -1085,6 +1085,11 @@ class IStrategy(ABC, HyperStrategyMixin): exits: List[ExitCheckTuple] = [] current_rate = rate current_profit = trade.calc_profit_ratio(current_rate) + current_profit_best = current_profit + if low is not None or high is not None: + # Set current rate to high for backtesting ROI exits + current_rate_best = (low if trade.is_short else high) or rate + current_profit_best = trade.calc_profit_ratio(current_rate_best) trade.adjust_min_max_rates(high or current_rate, low or current_rate) @@ -1093,20 +1098,13 @@ class IStrategy(ABC, HyperStrategyMixin): current_profit=current_profit, force_stoploss=force_stoploss, low=low, high=high) - # Set current rate to high for backtesting exits - current_rate = (low if trade.is_short else high) or rate - current_profit = trade.calc_profit_ratio(current_rate) - # if enter signal and ignore_roi is set, we don't need to evaluate min_roi. roi_reached = (not (enter and self.ignore_roi_if_entry_signal) - and self.min_roi_reached(trade=trade, current_profit=current_profit, + and self.min_roi_reached(trade=trade, current_profit=current_profit_best, current_time=current_time)) exit_signal = ExitType.NONE custom_reason = '' - # use provided rate in backtesting, not high/low. - current_rate = rate - current_profit = trade.calc_profit_ratio(current_rate) if self.use_exit_signal: if exit_ and not enter: @@ -1265,7 +1263,7 @@ class IStrategy(ABC, HyperStrategyMixin): :return: minimal ROI entry value or None if none proper ROI entry was found. """ # Get highest entry in ROI dict where key <= trade-duration - roi_list = list(filter(lambda x: x <= trade_dur, self.minimal_roi.keys())) + roi_list = [x for x in self.minimal_roi.keys() if x <= trade_dur] if not roi_list: return None, None roi_entry = max(roi_list) @@ -1302,7 +1300,7 @@ class IStrategy(ABC, HyperStrategyMixin): timedout = (order.status == 'open' and order.order_date_utc < timeout_threshold) if timedout: return True - time_method = (self.check_exit_timeout if order.side == trade.exit_side + time_method = (self.check_exit_timeout if order.ft_order_side == trade.exit_side else self.check_entry_timeout) return strategy_safe_wrapper(time_method, diff --git a/freqtrade/templates/FreqaiExampleStrategy.py b/freqtrade/templates/FreqaiExampleStrategy.py index 347efdda0..084cf2e89 100644 --- a/freqtrade/templates/FreqaiExampleStrategy.py +++ b/freqtrade/templates/FreqaiExampleStrategy.py @@ -232,7 +232,7 @@ class FreqaiExampleStrategy(IStrategy): # All indicators must be populated by feature_engineering_*() functions - # the model will return all labels created by user in `feature_engineering_*` + # the model will return all labels created by user in `set_freqai_targets()` # (& appended targets), an indication of whether or not the prediction should be accepted, # the target mean/std values for each of the labels created by user in # `set_freqai_targets()` for each training period. diff --git a/freqtrade/types/__init__.py b/freqtrade/types/__init__.py new file mode 100644 index 000000000..11fe6354b --- /dev/null +++ b/freqtrade/types/__init__.py @@ -0,0 +1 @@ +from freqtrade.types.valid_exchanges_type import ValidExchangesType # noqa: F401 diff --git a/freqtrade/types/valid_exchanges_type.py b/freqtrade/types/valid_exchanges_type.py new file mode 100644 index 000000000..c01149455 --- /dev/null +++ b/freqtrade/types/valid_exchanges_type.py @@ -0,0 +1,17 @@ +# Used for list-exchanges +from typing import List + +from typing_extensions import TypedDict + + +class TradeModeType(TypedDict): + trading_mode: str + margin_mode: str + + +class ValidExchangesType(TypedDict): + name: str + valid: bool + supported: bool + comment: str + trade_modes: List[TradeModeType] diff --git a/freqtrade/util/binance_mig.py b/freqtrade/util/binance_mig.py index 37a2d2ef1..9b0f8521f 100644 --- a/freqtrade/util/binance_mig.py +++ b/freqtrade/util/binance_mig.py @@ -3,7 +3,7 @@ import logging from packaging import version from sqlalchemy import select -from freqtrade.constants import Config +from freqtrade.constants import DOCS_LINK, Config from freqtrade.enums.tradingmode import TradingMode from freqtrade.exceptions import OperationalException from freqtrade.persistence.pairlock import PairLock @@ -25,7 +25,7 @@ def migrate_binance_futures_names(config: Config): if version.parse("2.6.26") > version.parse(ccxt.__version__): raise OperationalException( "Please follow the update instructions in the docs " - "(https://www.freqtrade.io/en/latest/updating/) to install a compatible ccxt version.") + f"({DOCS_LINK}/updating/) to install a compatible ccxt version.") _migrate_binance_futures_db(config) migrate_binance_futures_data(config) diff --git a/mkdocs.yml b/mkdocs.yml index 3f9e8a880..815a10419 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -22,6 +22,7 @@ nav: - Web Hook: webhook-config.md - Data Downloading: data-download.md - Backtesting: backtesting.md + - Lookahead analysis: lookahead-analysis.md - Hyperopt: hyperopt.md - FreqAI: - Introduction: freqai.md diff --git a/requirements-dev.txt b/requirements-dev.txt index cc3463174..282d35101 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,24 +7,24 @@ -r docs/requirements-docs.txt coveralls==3.3.1 -ruff==0.0.269 -mypy==1.3.0 -pre-commit==3.3.2 -pytest==7.3.1 +ruff==0.0.275 +mypy==1.4.1 +pre-commit==3.3.3 +pytest==7.4.0 pytest-asyncio==0.21.0 -pytest-cov==4.0.0 -pytest-mock==3.10.0 +pytest-cov==4.1.0 +pytest-mock==3.11.1 pytest-random-order==1.1.0 isort==5.12.0 # For datetime mocking -time-machine==2.9.0 +time-machine==2.10.0 # Convert jupyter notebooks to markdown documents -nbconvert==7.4.0 +nbconvert==7.6.0 # mypy types types-cachetools==5.3.0.5 types-filelock==3.2.7 -types-requests==2.30.0.0 +types-requests==2.31.0.1 types-tabulate==0.9.0.2 types-python-dateutil==2.8.19.13 diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index de48a1da4..2672f9c38 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -5,7 +5,7 @@ torch==2.0.1 #until these branches will be released we can use this gymnasium==0.28.1 -stable_baselines3==2.0.0a10 +stable_baselines3==2.0.0a13 sb3_contrib>=2.0.0a9 # Progress bar for stable-baselines3 and sb3-contrib tqdm==4.65.0 diff --git a/requirements-freqai.txt b/requirements-freqai.txt index ad069ade2..e8e3b9334 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -5,8 +5,8 @@ # Required for freqai scikit-learn==1.1.3 joblib==1.2.0 -catboost==1.1.1; sys_platform == 'darwin' and python_version < '3.9' -catboost==1.2; 'arm' not in platform_machine and (sys_platform != 'darwin' or python_version >= '3.9') +catboost==1.2; 'arm' not in platform_machine lightgbm==3.3.5 -xgboost==1.7.5 +xgboost==1.7.6 tensorboard==2.13.0 +datasieve==0.1.6 diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index 87b1fd3c8..163fee75f 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -5,4 +5,4 @@ scipy==1.10.1 scikit-learn==1.1.3 scikit-optimize==0.9.0 -filelock==3.12.0 +filelock==3.12.2 diff --git a/requirements-plot.txt b/requirements-plot.txt index 8b9ad5bc4..72303efcb 100644 --- a/requirements-plot.txt +++ b/requirements-plot.txt @@ -1,4 +1,4 @@ # Include all requirements to run the bot. -r requirements.txt -plotly==5.14.1 +plotly==5.15.0 diff --git a/requirements.txt b/requirements.txt index cff54ca1c..ebb1c9a16 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,19 +1,19 @@ numpy==1.24.3 -pandas==2.0.1 +pandas==2.0.2 pandas-ta==0.3.14b -ccxt==3.1.5 -cryptography==40.0.2; platform_machine != 'armv7l' +ccxt==3.1.44 +cryptography==41.0.1; platform_machine != 'armv7l' cryptography==40.0.1; platform_machine == 'armv7l' aiohttp==3.8.4 -SQLAlchemy==2.0.15 +SQLAlchemy==2.0.17 python-telegram-bot==20.3 # can't be hard-pinned due to telegram-bot pinning httpx with ~ -httpx>=0.23.3 +httpx>=0.24.1 arrow==1.2.3 -cachetools==5.3.0 +cachetools==5.3.1 requests==2.31.0 -urllib3==2.0.2 +urllib3==2.0.3 jsonschema==4.17.3 TA-Lib==0.4.26 technical==1.4.0 @@ -23,7 +23,7 @@ jinja2==3.1.2 tables==3.8.0 blosc==1.11.1 joblib==1.2.0 -rich==13.3.5 +rich==13.4.2 pyarrow==12.0.0; platform_machine != 'armv7l' # find first, C search in arrays @@ -32,14 +32,14 @@ py_find_1st==1.1.5 # Load ticker files 30% faster python-rapidjson==1.10 # Properly format api responses -orjson==3.8.12 +orjson==3.9.1 # Notify systemd sdnotify==0.3.2 # API Server -fastapi==0.95.2 -pydantic==1.10.7 +fastapi==0.99.1 +pydantic==1.10.9 uvicorn==0.22.0 pyjwt==2.7.0 aiofiles==23.1.0 @@ -60,5 +60,5 @@ schedule==1.2.0 websockets==11.0.3 janus==1.0.0 -ast-comments==1.0.1 +ast-comments==1.1.0 packaging==23.1 diff --git a/scripts/rest_client.py b/scripts/rest_client.py index 0772af269..2b4690287 100755 --- a/scripts/rest_client.py +++ b/scripts/rest_client.py @@ -29,7 +29,7 @@ logging.basicConfig( logger = logging.getLogger("ft_rest_client") -class FtRestClient(): +class FtRestClient: def __init__(self, serverurl, username=None, password=None): @@ -314,6 +314,13 @@ class FtRestClient(): """ return self._get(f"strategy/{strategy}") + def pairlists_available(self): + """Lists available pairlist providers + + :return: json object + """ + return self._get("pairlists/available") + def plot_config(self): """Return plot configuration if the strategy defines one. diff --git a/setup.py b/setup.py index f8b8b515c..4b73ae653 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ from setuptools import setup plot = ['plotly>=4.0'] hyperopt = [ 'scipy', - 'scikit-learn', + 'scikit-learn<=1.1.3', 'scikit-optimize>=0.7.0', 'filelock', ] @@ -16,7 +16,8 @@ freqai = [ 'catboost; platform_machine != "aarch64"', 'lightgbm', 'xgboost', - 'tensorboard' + 'tensorboard', + 'datasieve>=0.1.5' ] freqai_rl = [ @@ -107,7 +108,7 @@ setup( 'ast-comments', 'aiohttp', 'cryptography', - 'httpx', + 'httpx>=0.24.1', 'python-dateutil', 'packaging', ], diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index fe847e94b..fccfd8ebb 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -641,7 +641,7 @@ def test_get_ui_download_url_direct(mocker): def test_download_data_keyboardInterrupt(mocker, markets): - dl_mock = mocker.patch('freqtrade.commands.data_commands.refresh_backtest_ohlcv_data', + dl_mock = mocker.patch('freqtrade.commands.data_commands.download_data_main', MagicMock(side_effect=KeyboardInterrupt)) patch_exchange(mocker) mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) @@ -660,7 +660,7 @@ def test_download_data_keyboardInterrupt(mocker, markets): def test_download_data_timerange(mocker, markets): - dl_mock = mocker.patch('freqtrade.commands.data_commands.refresh_backtest_ohlcv_data', + dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) patch_exchange(mocker) mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) @@ -708,10 +708,10 @@ def test_download_data_timerange(mocker, markets): def test_download_data_no_markets(mocker, caplog): - dl_mock = mocker.patch('freqtrade.commands.data_commands.refresh_backtest_ohlcv_data', + dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) patch_exchange(mocker, id='binance') - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) + mocker.patch(f'{EXMS}.get_markets', return_value={}) args = [ "download-data", "--exchange", "binance", @@ -723,11 +723,11 @@ def test_download_data_no_markets(mocker, caplog): assert log_has("Pairs [ETH/BTC,XRP/BTC] not available on exchange Binance.", caplog) -def test_download_data_no_exchange(mocker, caplog): - mocker.patch('freqtrade.commands.data_commands.refresh_backtest_ohlcv_data', +def test_download_data_no_exchange(mocker): + mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) patch_exchange(mocker) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) + mocker.patch(f'{EXMS}.get_markets', return_value={}) args = [ "download-data", ] @@ -740,7 +740,7 @@ def test_download_data_no_exchange(mocker, caplog): def test_download_data_no_pairs(mocker): - mocker.patch('freqtrade.commands.data_commands.refresh_backtest_ohlcv_data', + mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) patch_exchange(mocker) mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) @@ -758,7 +758,7 @@ def test_download_data_no_pairs(mocker): def test_download_data_all_pairs(mocker, markets): - dl_mock = mocker.patch('freqtrade.commands.data_commands.refresh_backtest_ohlcv_data', + dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) patch_exchange(mocker) mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) @@ -792,13 +792,13 @@ def test_download_data_all_pairs(mocker, markets): assert set(dl_mock.call_args_list[0][1]['pairs']) == expected -def test_download_data_trades(mocker, caplog): - dl_mock = mocker.patch('freqtrade.commands.data_commands.refresh_backtest_trades_data', +def test_download_data_trades(mocker): + dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_trades_data', MagicMock(return_value=[])) - convert_mock = mocker.patch('freqtrade.commands.data_commands.convert_trades_to_ohlcv', + convert_mock = mocker.patch('freqtrade.data.history.history_utils.convert_trades_to_ohlcv', MagicMock(return_value=[])) patch_exchange(mocker) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) + mocker.patch(f'{EXMS}.get_markets', return_value={}) args = [ "download-data", "--exchange", "kraken", @@ -829,7 +829,7 @@ def test_download_data_trades(mocker, caplog): def test_download_data_data_invalid(mocker): patch_exchange(mocker, id="kraken") - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) + mocker.patch(f'{EXMS}.get_markets', return_value={}) args = [ "download-data", "--exchange", "kraken", diff --git a/tests/data/test_download_data.py b/tests/data/test_download_data.py new file mode 100644 index 000000000..191dbb7d3 --- /dev/null +++ b/tests/data/test_download_data.py @@ -0,0 +1,96 @@ +from unittest.mock import MagicMock, PropertyMock + +import pytest + +from freqtrade.configuration.config_setup import setup_utils_configuration +from freqtrade.data.history.history_utils import download_data_main +from freqtrade.enums import RunMode +from freqtrade.exceptions import OperationalException +from tests.conftest import EXMS, log_has, patch_exchange + + +def test_download_data_main_no_markets(mocker, caplog): + dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', + MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) + patch_exchange(mocker, id='binance') + mocker.patch(f'{EXMS}.get_markets', return_value={}) + config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) + config.update({ + "days": 20, + "pairs": ["ETH/BTC", "XRP/BTC"], + "timeframes": ["5m", "1h"] + }) + download_data_main(config) + assert dl_mock.call_args[1]['timerange'].starttype == "date" + assert log_has("Pairs [ETH/BTC,XRP/BTC] not available on exchange Binance.", caplog) + + +def test_download_data_main_all_pairs(mocker, markets): + + dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', + MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) + patch_exchange(mocker) + mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + + config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) + config.update({ + "pairs": [".*/USDT"], + "timeframes": ["5m", "1h"] + }) + download_data_main(config) + expected = set(['ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT']) + assert set(dl_mock.call_args_list[0][1]['pairs']) == expected + assert dl_mock.call_count == 1 + + dl_mock.reset_mock() + + config.update({ + "pairs": [".*/USDT"], + "timeframes": ["5m", "1h"], + "include_inactive": True + }) + download_data_main(config) + expected = set(['ETH/USDT', 'LTC/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT']) + assert set(dl_mock.call_args_list[0][1]['pairs']) == expected + + +def test_download_data_main_trades(mocker): + dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_trades_data', + MagicMock(return_value=[])) + convert_mock = mocker.patch('freqtrade.data.history.history_utils.convert_trades_to_ohlcv', + MagicMock(return_value=[])) + patch_exchange(mocker) + mocker.patch(f'{EXMS}.get_markets', return_value={}) + config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) + config.update({ + "days": 20, + "pairs": ["ETH/BTC", "XRP/BTC"], + "timeframes": ["5m", "1h"], + "download_trades": True, + }) + download_data_main(config) + + assert dl_mock.call_args[1]['timerange'].starttype == "date" + assert dl_mock.call_count == 1 + assert convert_mock.call_count == 1 + config.update({ + "download_trades": True, + "trading_mode": "futures", + }) + + with pytest.raises(OperationalException, + match="Trade download not supported for futures."): + download_data_main(config) + + +def test_download_data_main_data_invalid(mocker): + patch_exchange(mocker, id="kraken") + mocker.patch(f'{EXMS}.get_markets', return_value={}) + config = setup_utils_configuration({"exchange": "kraken"}, RunMode.UTIL_EXCHANGE) + config.update({ + "days": 20, + "pairs": ["ETH/BTC", "XRP/BTC"], + "timeframes": ["5m", "1h"], + }) + with pytest.raises(OperationalException, match=r"Historic klines not available for .*"): + download_data_main(config) diff --git a/tests/data/test_history.py b/tests/data/test_history.py index e397c97c1..ab2238b08 100644 --- a/tests/data/test_history.py +++ b/tests/data/test_history.py @@ -1,6 +1,7 @@ # pragma pylint: disable=missing-docstring, protected-access, C0103 import json +import logging import uuid from pathlib import Path from shutil import copyfile @@ -503,9 +504,10 @@ def test_validate_backtest_data(default_conf, mocker, caplog, testdatadir) -> No ]) def test_refresh_backtest_ohlcv_data( mocker, default_conf, markets, caplog, testdatadir, trademode, callcount): - dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_pair_history', - MagicMock()) + caplog.set_level(logging.DEBUG) + dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_pair_history') mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch.object(Path, "exists", MagicMock(return_value=True)) mocker.patch.object(Path, "unlink", MagicMock()) @@ -520,7 +522,7 @@ def test_refresh_backtest_ohlcv_data( assert dl_mock.call_count == callcount assert dl_mock.call_args[1]['timerange'].starttype == 'date' - assert log_has("Downloading pair ETH/BTC, interval 1m.", caplog) + assert log_has_re(r"Downloading pair ETH/BTC, .* interval 1m\.", caplog) def test_download_data_no_markets(mocker, default_conf, caplog, testdatadir): diff --git a/tests/exchange/test_ccxt_compat.py b/tests/exchange/test_ccxt_compat.py index 404b51d10..b1a6f1c65 100644 --- a/tests/exchange/test_ccxt_compat.py +++ b/tests/exchange/test_ccxt_compat.py @@ -342,7 +342,7 @@ def exchange_futures(request, exchange_conf, class_mocker): @pytest.mark.longrun -class TestCCXTExchange(): +class TestCCXTExchange: def test_load_markets(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange diff --git a/tests/exchange/test_exchange.py b/tests/exchange/test_exchange.py index f022a0905..5fa2755d2 100644 --- a/tests/exchange/test_exchange.py +++ b/tests/exchange/test_exchange.py @@ -3489,6 +3489,27 @@ def test_stoploss_order_unsupported_exchange(default_conf, mocker): exchange.stoploss_adjust(1, {}, side="sell") +@pytest.mark.parametrize('side,ratio,expected', [ + ('sell', 0.99, 99.0), # Default + ('sell', 0.999, 99.9), + ('sell', 1, 100), + ('sell', 1.1, InvalidOrderException), + ('buy', 0.99, 101.0), # Default + ('buy', 0.999, 100.1), + ('buy', 1, 100), + ('buy', 1.1, InvalidOrderException), + ]) +def test__get_stop_limit_rate(default_conf_usdt, mocker, side, ratio, expected): + exchange = get_patched_exchange(mocker, default_conf_usdt, id='binance') + + order_types = {'stoploss_on_exchange_limit_ratio': ratio} + if isinstance(expected, type) and issubclass(expected, Exception): + with pytest.raises(expected): + exchange._get_stop_limit_rate(100, order_types, side) + else: + assert exchange._get_stop_limit_rate(100, order_types, side) == expected + + def test_merge_ft_has_dict(default_conf, mocker): mocker.patch.multiple(EXMS, _init_ccxt=MagicMock(return_value=MagicMock()), diff --git a/tests/exchange/test_okx.py b/tests/exchange/test_okx.py index 3824eddb7..e8f059118 100644 --- a/tests/exchange/test_okx.py +++ b/tests/exchange/test_okx.py @@ -499,7 +499,11 @@ def test__set_leverage_okx(mocker, default_conf): assert api_mock.set_leverage.call_args_list[0][1]['params'] == { 'mgnMode': 'isolated', 'posSide': 'net'} + api_mock.set_leverage = MagicMock(side_effect=ccxt.NetworkError()) + exchange._lev_prep('BTC/USDT:USDT', 3.2, 'buy') + api_mock.fetch_leverage.call_count == 1 + api_mock.fetch_leverage = MagicMock(side_effect=ccxt.NetworkError()) ccxt_exceptionhandlers( mocker, default_conf, @@ -592,3 +596,25 @@ def test_stoploss_adjust_okx(mocker, default_conf, sl1, sl2, sl3, side): } assert exchange.stoploss_adjust(sl1, order, side=side) assert not exchange.stoploss_adjust(sl2, order, side=side) + + +def test_stoploss_cancel_okx(mocker, default_conf): + exchange = get_patched_exchange(mocker, default_conf, id='okx') + + exchange.cancel_order = MagicMock() + + exchange.cancel_stoploss_order('1234', 'ETH/USDT') + assert exchange.cancel_order.call_count == 1 + assert exchange.cancel_order.call_args_list[0][1]['order_id'] == '1234' + assert exchange.cancel_order.call_args_list[0][1]['pair'] == 'ETH/USDT' + assert exchange.cancel_order.call_args_list[0][1]['params'] == {'stop': True} + + +def test__get_stop_params_okx(mocker, default_conf): + default_conf['trading_mode'] = 'futures' + default_conf['margin_mode'] = 'isolated' + exchange = get_patched_exchange(mocker, default_conf, id='okx') + params = exchange._get_stop_params('ETH/USDT:USDT', 1500, 'sell') + + assert params['tdMode'] == 'isolated' + assert params['posSide'] == 'net' diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 13dc6b4b0..8d09cfc58 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -9,9 +9,9 @@ from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from tests.conftest import get_patched_exchange, log_has_re +from tests.conftest import get_patched_exchange from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy, - make_data_dictionary, make_unfiltered_dataframe) + make_unfiltered_dataframe) from tests.freqai.test_freqai_interface import is_mac @@ -72,68 +72,6 @@ def test_check_if_model_expired(mocker, freqai_conf): shutil.rmtree(Path(dk.full_path)) -def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog): - freqai = make_data_dictionary(mocker, freqai_conf) - # freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 1}) - freqai.dk.use_DBSCAN_to_remove_outliers(predict=False) - assert log_has_re(r"DBSCAN found eps of 1\.7\d\.", caplog) - - -def test_compute_distances(mocker, freqai_conf): - freqai = make_data_dictionary(mocker, freqai_conf) - freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 1}) - avg_mean_dist = freqai.dk.compute_distances() - assert round(avg_mean_dist, 2) == 1.98 - - -def test_use_SVM_to_remove_outliers_and_outlier_protection(mocker, freqai_conf, caplog): - freqai = make_data_dictionary(mocker, freqai_conf) - freqai_conf['freqai']['feature_parameters'].update({"outlier_protection_percentage": 0.1}) - freqai.dk.use_SVM_to_remove_outliers(predict=False) - assert log_has_re( - "SVM detected 7.83%", - caplog, - ) - - -def test_compute_inlier_metric(mocker, freqai_conf, caplog): - freqai = make_data_dictionary(mocker, freqai_conf) - freqai_conf['freqai']['feature_parameters'].update({"inlier_metric_window": 10}) - freqai.dk.compute_inlier_metric(set_='train') - assert log_has_re( - "Inlier metric computed and added to features.", - caplog, - ) - - -def test_add_noise_to_training_features(mocker, freqai_conf): - freqai = make_data_dictionary(mocker, freqai_conf) - freqai_conf['freqai']['feature_parameters'].update({"noise_standard_deviation": 0.1}) - freqai.dk.add_noise_to_training_features() - - -def test_remove_beginning_points_from_data_dict(mocker, freqai_conf): - freqai = make_data_dictionary(mocker, freqai_conf) - freqai.dk.remove_beginning_points_from_data_dict(set_='train') - - -def test_principal_component_analysis(mocker, freqai_conf, caplog): - freqai = make_data_dictionary(mocker, freqai_conf) - freqai.dk.principal_component_analysis() - assert log_has_re( - "reduced feature dimension by", - caplog, - ) - - -def test_normalize_data(mocker, freqai_conf): - freqai = make_data_dictionary(mocker, freqai_conf) - data_dict = freqai.dk.data_dictionary - freqai.dk.normalize_data(data_dict) - assert any('_max' in entry for entry in freqai.dk.data.keys()) - assert any('_min' in entry for entry in freqai.dk.data.keys()) - - def test_filter_features(mocker, freqai_conf): freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) freqai.dk.find_features(unfiltered_dataframe) diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 61a7b7346..55338f611 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -1,3 +1,4 @@ +import logging import platform import shutil import sys @@ -37,21 +38,22 @@ def can_run_model(model: str) -> None: pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.") -@pytest.mark.parametrize('model, pca, dbscan, float32, can_short, shuffle, buffer', [ - ('LightGBMRegressor', True, False, True, True, False, 0), - ('XGBoostRegressor', False, True, False, True, False, 10), - ('XGBoostRFRegressor', False, False, False, True, False, 0), - ('CatboostRegressor', False, False, False, True, True, 0), - ('PyTorchMLPRegressor', False, False, False, False, False, 0), - ('PyTorchTransformerRegressor', False, False, False, False, False, 0), - ('ReinforcementLearner', False, True, False, True, False, 0), - ('ReinforcementLearner_multiproc', False, False, False, True, False, 0), - ('ReinforcementLearner_test_3ac', False, False, False, False, False, 0), - ('ReinforcementLearner_test_3ac', False, False, False, True, False, 0), - ('ReinforcementLearner_test_4ac', False, False, False, True, False, 0), +@pytest.mark.parametrize('model, pca, dbscan, float32, can_short, shuffle, buffer, noise', [ + ('LightGBMRegressor', True, False, True, True, False, 0, 0), + ('XGBoostRegressor', False, True, False, True, False, 10, 0.05), + ('XGBoostRFRegressor', False, False, False, True, False, 0, 0), + ('CatboostRegressor', False, False, False, True, True, 0, 0), + ('PyTorchMLPRegressor', False, False, False, False, False, 0, 0), + ('PyTorchTransformerRegressor', False, False, False, False, False, 0, 0), + ('ReinforcementLearner', False, True, False, True, False, 0, 0), + ('ReinforcementLearner_multiproc', False, False, False, True, False, 0, 0), + ('ReinforcementLearner_test_3ac', False, False, False, False, False, 0, 0), + ('ReinforcementLearner_test_3ac', False, False, False, True, False, 0, 0), + ('ReinforcementLearner_test_4ac', False, False, False, True, False, 0, 0), ]) def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, - dbscan, float32, can_short, shuffle, buffer): + dbscan, float32, can_short, shuffle, + buffer, noise): can_run_model(model) @@ -68,12 +70,14 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, freqai_conf.update({"reduce_df_footprint": float32}) freqai_conf['freqai']['feature_parameters'].update({"shuffle_after_split": shuffle}) freqai_conf['freqai']['feature_parameters'].update({"buffer_train_data_candles": buffer}) + freqai_conf['freqai']['feature_parameters'].update({"noise_standard_deviation": noise}) if 'ReinforcementLearner' in model: model_save_ext = 'zip' freqai_conf = make_rl_config(freqai_conf) # test the RL guardrails freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) + freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 2}) freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) if 'test_3ac' in model or 'test_4ac' in model: @@ -162,7 +166,6 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, s assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() - assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").is_file() assert len(freqai.dk.data['training_features_list']) == 14 shutil.rmtree(Path(freqai.dk.full_path)) @@ -218,7 +221,6 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): f"{freqai.dk.model_filename}_model{model_file_extension}").exists() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists() - assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_svm_model.joblib").exists() shutil.rmtree(Path(freqai.dk.full_path)) @@ -283,9 +285,6 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) _, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) df = base_df[freqai_conf["timeframe"]] - for i in range(5): - df[f'%-constant_{i}'] = i - metadata = {"pair": "LTC/BTC"} freqai.dk.set_paths('LTC/BTC', None) freqai.start_backtesting(df, metadata, freqai.dk, strategy) @@ -293,14 +292,6 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) assert len(model_folders) == num_files Trade.use_db = True - assert log_has_re( - "Removed features ", - caplog, - ) - assert log_has_re( - "Removed 5 features from prediction features, ", - caplog, - ) Backtesting.cleanup() shutil.rmtree(Path(freqai.dk.full_path)) @@ -425,36 +416,6 @@ def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): shutil.rmtree(Path(freqai.dk.full_path)) -def test_principal_component_analysis(mocker, freqai_conf): - freqai_conf.update({"timerange": "20180110-20180130"}) - freqai_conf.get("freqai", {}).get("feature_parameters", {}).update( - {"princpial_component_analysis": "true"}) - - strategy = get_patched_freqai_strategy(mocker, freqai_conf) - exchange = get_patched_exchange(mocker, freqai_conf) - strategy.dp = DataProvider(freqai_conf, exchange) - strategy.freqai_info = freqai_conf.get("freqai", {}) - freqai = strategy.freqai - freqai.live = True - freqai.dk = FreqaiDataKitchen(freqai_conf) - freqai.dk.live = True - timerange = TimeRange.parse_timerange("20180110-20180130") - freqai.dd.load_all_pair_histories(timerange, freqai.dk) - - freqai.dd.pair_dict = MagicMock() - - data_load_timerange = TimeRange.parse_timerange("20180110-20180130") - new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.dk.set_paths('ADA/BTC', None) - - freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) - - assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_pca_object.pkl") - - shutil.rmtree(Path(freqai.dk.full_path)) - - def test_plot_feature_importance(mocker, freqai_conf): from freqtrade.freqai.utils import plot_feature_importance @@ -540,6 +501,7 @@ def test_get_required_data_timerange(mocker, freqai_conf): def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir): + caplog.set_level(logging.DEBUG) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) pairlist = PairListManager(exchange, freqai_conf) diff --git a/tests/optimize/test_backtest_detail.py b/tests/optimize/test_backtest_detail.py index 158dd04dc..82c036e07 100644 --- a/tests/optimize/test_backtest_detail.py +++ b/tests/optimize/test_backtest_detail.py @@ -820,7 +820,7 @@ tc52 = BTContainer(data=[ [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Order readjust [3, 5100, 5100, 4650, 4750, 6172, 0, 0], # stoploss hit? [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.03, roi={"0": 0.10}, profit_perc=-0.03, + stop_loss=-0.03, roi={}, profit_perc=-0.03, use_exit_signal=True, timeout=1000, custom_entry_price=4200, adjust_entry_price=5200, trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2, is_short=False)] diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index bef942b43..a333cda9d 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -1437,9 +1437,11 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir): strattable_mock = MagicMock() strat_summary = MagicMock() - mocker.patch.multiple('freqtrade.optimize.optimize_reports', + mocker.patch.multiple('freqtrade.optimize.optimize_reports.bt_output', text_table_bt_results=text_table_mock, text_table_strategy=strattable_mock, + ) + mocker.patch.multiple('freqtrade.optimize.optimize_reports.optimize_reports', generate_pair_metrics=MagicMock(), generate_exit_reason_stats=sell_reason_mock, generate_strategy_comparison=strat_summary, diff --git a/tests/optimize/test_lookahead_analysis.py b/tests/optimize/test_lookahead_analysis.py new file mode 100644 index 000000000..3c6a5ad6d --- /dev/null +++ b/tests/optimize/test_lookahead_analysis.py @@ -0,0 +1,366 @@ +# pragma pylint: disable=missing-docstring, W0212, line-too-long, C0103, unused-argument +from copy import deepcopy +from pathlib import Path +from unittest.mock import MagicMock, PropertyMock + +import pytest + +from freqtrade.commands.optimize_commands import start_lookahead_analysis +from freqtrade.data.history import get_timerange +from freqtrade.exceptions import OperationalException +from freqtrade.optimize.lookahead_analysis import Analysis, LookaheadAnalysis +from freqtrade.optimize.lookahead_analysis_helpers import LookaheadAnalysisSubFunctions +from tests.conftest import EXMS, get_args, log_has_re, patch_exchange + + +@pytest.fixture +def lookahead_conf(default_conf_usdt): + default_conf_usdt['minimum_trade_amount'] = 10 + default_conf_usdt['targeted_trade_amount'] = 20 + default_conf_usdt['strategy_path'] = str( + Path(__file__).parent.parent / "strategy/strats/lookahead_bias") + default_conf_usdt['strategy'] = 'strategy_test_v3_with_lookahead_bias' + default_conf_usdt['max_open_trades'] = 1 + default_conf_usdt['dry_run_wallet'] = 1000000000 + default_conf_usdt['pairs'] = ['UNITTEST/USDT'] + return default_conf_usdt + + +def test_start_lookahead_analysis(mocker): + single_mock = MagicMock() + text_table_mock = MagicMock() + mocker.patch.multiple( + 'freqtrade.optimize.lookahead_analysis_helpers.LookaheadAnalysisSubFunctions', + initialize_single_lookahead_analysis=single_mock, + text_table_lookahead_analysis_instances=text_table_mock, + ) + args = [ + "lookahead-analysis", + "--strategy", + "strategy_test_v3_with_lookahead_bias", + "--strategy-path", + str(Path(__file__).parent.parent / "strategy/strats/lookahead_bias"), + "--pairs", + "UNITTEST/BTC", + "--max-open-trades", + "1" + ] + pargs = get_args(args) + pargs['config'] = None + + start_lookahead_analysis(pargs) + assert single_mock.call_count == 1 + assert text_table_mock.call_count == 1 + + single_mock.reset_mock() + + # Test invalid config + args = [ + "lookahead-analysis", + "--strategy", + "strategy_test_v3_with_lookahead_bias", + "--strategy-path", + str(Path(__file__).parent.parent / "strategy/strats/lookahead_bias"), + "--targeted-trade-amount", + "10", + "--minimum-trade-amount", + "20", + ] + pargs = get_args(args) + pargs['config'] = None + with pytest.raises(OperationalException, + match=r"Targeted trade amount can't be smaller than minimum trade amount.*"): + start_lookahead_analysis(pargs) + + +def test_lookahead_helper_invalid_config(lookahead_conf) -> None: + conf = deepcopy(lookahead_conf) + conf['targeted_trade_amount'] = 10 + conf['minimum_trade_amount'] = 40 + with pytest.raises(OperationalException, + match=r"Targeted trade amount can't be smaller than minimum trade amount.*"): + LookaheadAnalysisSubFunctions.start(conf) + + +def test_lookahead_helper_no_strategy_defined(lookahead_conf): + conf = deepcopy(lookahead_conf) + conf['pairs'] = ['UNITTEST/USDT'] + del conf['strategy'] + with pytest.raises(OperationalException, + match=r"No Strategy specified"): + LookaheadAnalysisSubFunctions.start(conf) + + +def test_lookahead_helper_start(lookahead_conf, mocker) -> None: + single_mock = MagicMock() + text_table_mock = MagicMock() + mocker.patch.multiple( + 'freqtrade.optimize.lookahead_analysis_helpers.LookaheadAnalysisSubFunctions', + initialize_single_lookahead_analysis=single_mock, + text_table_lookahead_analysis_instances=text_table_mock, + ) + LookaheadAnalysisSubFunctions.start(lookahead_conf) + assert single_mock.call_count == 1 + assert text_table_mock.call_count == 1 + + single_mock.reset_mock() + text_table_mock.reset_mock() + + +def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf): + analysis = Analysis() + analysis.has_bias = True + analysis.total_signals = 5 + analysis.false_entry_signals = 4 + analysis.false_exit_signals = 3 + + strategy_obj = { + 'name': "strategy_test_v3_with_lookahead_bias", + 'location': Path(lookahead_conf['strategy_path'], f"{lookahead_conf['strategy']}.py") + } + + instance = LookaheadAnalysis(lookahead_conf, strategy_obj) + instance.current_analysis = analysis + table, headers, data = (LookaheadAnalysisSubFunctions. + text_table_lookahead_analysis_instances(lookahead_conf, [instance])) + + # check row contents for a try that has too few signals + assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py' + assert data[0][1] == 'strategy_test_v3_with_lookahead_bias' + assert data[0][2].__contains__('too few trades') + assert len(data[0]) == 3 + + # now check for an error which occured after enough trades + analysis.total_signals = 12 + analysis.false_entry_signals = 11 + analysis.false_exit_signals = 10 + instance = LookaheadAnalysis(lookahead_conf, strategy_obj) + instance.current_analysis = analysis + table, headers, data = (LookaheadAnalysisSubFunctions. + text_table_lookahead_analysis_instances(lookahead_conf, [instance])) + assert data[0][2].__contains__("error") + + # edit it into not showing an error + instance.failed_bias_check = False + table, headers, data = (LookaheadAnalysisSubFunctions. + text_table_lookahead_analysis_instances(lookahead_conf, [instance])) + assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py' + assert data[0][1] == 'strategy_test_v3_with_lookahead_bias' + assert data[0][2] # True + assert data[0][3] == 12 + assert data[0][4] == 11 + assert data[0][5] == 10 + assert data[0][6] == '' + + analysis.false_indicators.append('falseIndicator1') + analysis.false_indicators.append('falseIndicator2') + table, headers, data = (LookaheadAnalysisSubFunctions. + text_table_lookahead_analysis_instances(lookahead_conf, [instance])) + + assert data[0][6] == 'falseIndicator1, falseIndicator2' + + # check amount of returning rows + assert len(data) == 1 + + # check amount of multiple rows + table, headers, data = (LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( + lookahead_conf, [instance, instance, instance])) + assert len(data) == 3 + + +def test_lookahead_helper_export_to_csv(lookahead_conf): + import pandas as pd + lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv" + + # just to be sure the test won't fail: remove file if exists for some reason + # (repeat this at the end once again to clean up) + if Path(lookahead_conf['lookahead_analysis_exportfilename']).exists(): + Path(lookahead_conf['lookahead_analysis_exportfilename']).unlink() + + # before we can start we have to delete the + + # 1st check: create a new file and verify its contents + analysis1 = Analysis() + analysis1.has_bias = True + analysis1.total_signals = 12 + analysis1.false_entry_signals = 11 + analysis1.false_exit_signals = 10 + analysis1.false_indicators.append('falseIndicator1') + analysis1.false_indicators.append('falseIndicator2') + lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv" + + strategy_obj1 = { + 'name': "strat1", + 'location': Path("file1.py"), + } + + instance1 = LookaheadAnalysis(lookahead_conf, strategy_obj1) + instance1.failed_bias_check = False + instance1.current_analysis = analysis1 + + LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance1]) + saved_data1 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename']) + + expected_values1 = [ + [ + 'file1.py', 'strat1', True, + 12, 11, 10, + "falseIndicator1,falseIndicator2" + ], + ] + expected_columns = ['filename', 'strategy', 'has_bias', + 'total_signals', 'biased_entry_signals', 'biased_exit_signals', + 'biased_indicators'] + expected_data1 = pd.DataFrame(expected_values1, columns=expected_columns) + + assert Path(lookahead_conf['lookahead_analysis_exportfilename']).exists() + assert expected_data1.equals(saved_data1) + + # 2nd check: update the same strategy (which internally changed or is being retested) + expected_values2 = [ + [ + 'file1.py', 'strat1', False, + 22, 21, 20, + "falseIndicator3,falseIndicator4" + ], + ] + expected_data2 = pd.DataFrame(expected_values2, columns=expected_columns) + + analysis2 = Analysis() + analysis2.has_bias = False + analysis2.total_signals = 22 + analysis2.false_entry_signals = 21 + analysis2.false_exit_signals = 20 + analysis2.false_indicators.append('falseIndicator3') + analysis2.false_indicators.append('falseIndicator4') + + strategy_obj2 = { + 'name': "strat1", + 'location': Path("file1.py"), + } + + instance2 = LookaheadAnalysis(lookahead_conf, strategy_obj2) + instance2.failed_bias_check = False + instance2.current_analysis = analysis2 + + LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance2]) + saved_data2 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename']) + + assert expected_data2.equals(saved_data2) + + # 3rd check: now we add a new row to an already existing file + expected_values3 = [ + [ + 'file1.py', 'strat1', False, + 22, 21, 20, + "falseIndicator3,falseIndicator4" + ], + [ + 'file3.py', 'strat3', True, + 32, 31, 30, "falseIndicator5,falseIndicator6" + ], + ] + + expected_data3 = pd.DataFrame(expected_values3, columns=expected_columns) + + analysis3 = Analysis() + analysis3.has_bias = True + analysis3.total_signals = 32 + analysis3.false_entry_signals = 31 + analysis3.false_exit_signals = 30 + analysis3.false_indicators.append('falseIndicator5') + analysis3.false_indicators.append('falseIndicator6') + lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv" + + strategy_obj3 = { + 'name': "strat3", + 'location': Path("file3.py"), + } + + instance3 = LookaheadAnalysis(lookahead_conf, strategy_obj3) + instance3.failed_bias_check = False + instance3.current_analysis = analysis3 + + LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance3]) + saved_data3 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename']) + assert expected_data3.equals(saved_data3) + + # remove csv file after the test is done + if Path(lookahead_conf['lookahead_analysis_exportfilename']).exists(): + Path(lookahead_conf['lookahead_analysis_exportfilename']).unlink() + + +def test_initialize_single_lookahead_analysis(lookahead_conf, mocker, caplog): + mocker.patch('freqtrade.data.history.get_timerange', get_timerange) + mocker.patch(f'{EXMS}.get_fee', return_value=0.0) + mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.00001) + mocker.patch(f'{EXMS}.get_max_pair_stake_amount', return_value=float('inf')) + patch_exchange(mocker) + mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', + PropertyMock(return_value=['UNITTEST/BTC'])) + lookahead_conf['pairs'] = ['UNITTEST/USDT'] + + lookahead_conf['timeframe'] = '5m' + lookahead_conf['timerange'] = '20180119-20180122' + start_mock = mocker.patch('freqtrade.optimize.lookahead_analysis.LookaheadAnalysis.start') + strategy_obj = { + 'name': "strategy_test_v3_with_lookahead_bias", + 'location': Path(lookahead_conf['strategy_path'], f"{lookahead_conf['strategy']}.py") + } + + instance = LookaheadAnalysisSubFunctions.initialize_single_lookahead_analysis( + lookahead_conf, strategy_obj) + assert log_has_re(r"Bias test of .* started\.", caplog) + assert start_mock.call_count == 1 + + assert instance.strategy_obj['name'] == "strategy_test_v3_with_lookahead_bias" + + +@pytest.mark.parametrize('scenario', [ + 'no_bias', 'bias1' +]) +def test_biased_strategy(lookahead_conf, mocker, caplog, scenario) -> None: + mocker.patch('freqtrade.data.history.get_timerange', get_timerange) + mocker.patch(f'{EXMS}.get_fee', return_value=0.0) + mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.00001) + mocker.patch(f'{EXMS}.get_max_pair_stake_amount', return_value=float('inf')) + patch_exchange(mocker) + mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', + PropertyMock(return_value=['UNITTEST/BTC'])) + lookahead_conf['pairs'] = ['UNITTEST/USDT'] + + lookahead_conf['timeframe'] = '5m' + lookahead_conf['timerange'] = '20180119-20180122' + + # Patch scenario Parameter to allow for easy selection + mocker.patch('freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file', + return_value={ + 'params': { + "buy": { + "scenario": scenario + } + } + }) + + strategy_obj = {'name': "strategy_test_v3_with_lookahead_bias"} + instance = LookaheadAnalysis(lookahead_conf, strategy_obj) + instance.start() + # Assert init correct + assert log_has_re(f"Strategy Parameter: scenario = {scenario}", caplog) + + # check non-biased strategy + if scenario == "no_bias": + assert not instance.current_analysis.has_bias + # check biased strategy + elif scenario == "bias1": + assert instance.current_analysis.has_bias + + +def test_config_overrides(lookahead_conf): + lookahead_conf['max_open_trades'] = 0 + lookahead_conf['dry_run_wallet'] = 1 + lookahead_conf['pairs'] = ['BTC/USDT', 'ETH/USDT', 'SOL/USDT'] + lookahead_conf = LookaheadAnalysisSubFunctions.calculate_config_overrides(lookahead_conf) + + assert lookahead_conf['dry_run_wallet'] == 1000000000 + assert lookahead_conf['max_open_trades'] == 3 diff --git a/tests/optimize/test_optimize_reports.py b/tests/optimize/test_optimize_reports.py index 82e8a46fb..7b85e7978 100644 --- a/tests/optimize/test_optimize_reports.py +++ b/tests/optimize/test_optimize_reports.py @@ -14,15 +14,16 @@ from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backte load_backtest_stats) from freqtrade.edge import PairInfo from freqtrade.enums import ExitType -from freqtrade.optimize.optimize_reports import (_get_resample_from_period, generate_backtest_stats, - generate_daily_stats, generate_edge_table, - generate_exit_reason_stats, generate_pair_metrics, +from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_daily_stats, + generate_edge_table, generate_exit_reason_stats, + generate_pair_metrics, generate_periodic_breakdown_stats, generate_strategy_comparison, generate_trading_stats, show_sorted_pairlist, store_backtest_analysis_results, store_backtest_stats, text_table_bt_results, text_table_exit_reason, text_table_strategy) +from freqtrade.optimize.optimize_reports.optimize_reports import _get_resample_from_period from freqtrade.resolvers.strategy_resolver import StrategyResolver from freqtrade.util import dt_ts from freqtrade.util.datetime_helpers import dt_from_ts, dt_utc @@ -209,7 +210,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir): def test_store_backtest_stats(testdatadir, mocker): - dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.file_dump_json') + dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.bt_storage.file_dump_json') store_backtest_stats(testdatadir, {'metadata': {}}, '2022_01_01_15_05_13') @@ -228,7 +229,8 @@ def test_store_backtest_stats(testdatadir, mocker): def test_store_backtest_candles(testdatadir, mocker): - dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.file_dump_joblib') + dump_mock = mocker.patch( + 'freqtrade.optimize.optimize_reports.bt_storage.file_dump_joblib') candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}} diff --git a/tests/rpc/test_rpc.py b/tests/rpc/test_rpc.py index 405727d8c..31d7ae37b 100644 --- a/tests/rpc/test_rpc.py +++ b/tests/rpc/test_rpc.py @@ -703,15 +703,15 @@ def test_rpc_force_exit(default_conf, ticker, fee, mocker) -> None: rpc._rpc_force_exit(None) msg = rpc._rpc_force_exit('all') - assert msg == {'result': 'Created sell orders for all open trades.'} + assert msg == {'result': 'Created exit orders for all open trades.'} freqtradebot.enter_positions() msg = rpc._rpc_force_exit('all') - assert msg == {'result': 'Created sell orders for all open trades.'} + assert msg == {'result': 'Created exit orders for all open trades.'} freqtradebot.enter_positions() msg = rpc._rpc_force_exit('2') - assert msg == {'result': 'Created sell order for trade 2.'} + assert msg == {'result': 'Created exit order for trade 2.'} freqtradebot.state = State.STOPPED with pytest.raises(RPCException, match=r'.*trader is not running*'): @@ -761,27 +761,11 @@ def test_rpc_force_exit(default_conf, ticker, fee, mocker) -> None: freqtradebot.config['max_open_trades'] = 3 freqtradebot.enter_positions() - trade = Trade.session.scalars(select(Trade).filter(Trade.id == '2')).first() - amount = trade.amount - # make an limit-buy open trade, if there is no 'filled', don't sell it - mocker.patch( - f'{EXMS}.fetch_order', - return_value={ - 'status': 'open', - 'type': 'limit', - 'side': 'buy', - 'filled': None - } - ) - # check that the trade is called, which is done by ensuring exchange.cancel_order is called - msg = rpc._rpc_force_exit('4') - assert msg == {'result': 'Created sell order for trade 4.'} - assert cancel_order_mock.call_count == 2 - assert trade.amount == amount + cancel_order_mock.reset_mock() trade = Trade.session.scalars(select(Trade).filter(Trade.id == '3')).first() - - # make an limit-sell open trade + amount = trade.amount + # make an limit-sell open order trade mocker.patch( f'{EXMS}.fetch_order', return_value={ @@ -794,10 +778,54 @@ def test_rpc_force_exit(default_conf, ticker, fee, mocker) -> None: 'id': trade.orders[0].order_id, } ) + cancel_order_3 = mocker.patch( + f'{EXMS}.cancel_order_with_result', + return_value={ + 'status': 'canceled', + 'type': 'limit', + 'side': 'sell', + 'amount': amount, + 'remaining': amount, + 'filled': 0.0, + 'id': trade.orders[0].order_id, + } + ) msg = rpc._rpc_force_exit('3') - assert msg == {'result': 'Created sell order for trade 3.'} + assert msg == {'result': 'Created exit order for trade 3.'} # status quo, no exchange calls - assert cancel_order_mock.call_count == 3 + assert cancel_order_3.call_count == 1 + assert cancel_order_mock.call_count == 0 + + trade = Trade.session.scalars(select(Trade).filter(Trade.id == '2')).first() + amount = trade.amount + # make an limit-buy open trade, if there is no 'filled', don't sell it + mocker.patch( + f'{EXMS}.fetch_order', + return_value={ + 'status': 'open', + 'type': 'limit', + 'side': 'buy', + 'filled': None + } + ) + cancel_order_4 = mocker.patch( + f'{EXMS}.cancel_order_with_result', + return_value={ + 'status': 'canceled', + 'type': 'limit', + 'side': 'sell', + 'amount': amount, + 'remaining': 0.0, + 'filled': amount, + 'id': trade.orders[0].order_id, + } + ) + # check that the trade is called, which is done by ensuring exchange.cancel_order is called + msg = rpc._rpc_force_exit('4') + assert msg == {'result': 'Created exit order for trade 4.'} + assert cancel_order_4.call_count == 1 + assert cancel_order_mock.call_count == 0 + assert trade.amount == amount def test_performance_handle(default_conf_usdt, ticker, fee, mocker) -> None: diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 842981ad0..f793b1f9c 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1333,7 +1333,7 @@ def test_api_forceexit(botclient, mocker, ticker, fee, markets): rc = client_post(client, f"{BASE_URI}/forceexit", data={"tradeid": "5", "ordertype": "market", "amount": 23}) assert_response(rc) - assert rc.json() == {'result': 'Created sell order for trade 5.'} + assert rc.json() == {'result': 'Created exit order for trade 5.'} Trade.rollback() trade = Trade.get_trades([Trade.id == 5]).first() @@ -1343,7 +1343,7 @@ def test_api_forceexit(botclient, mocker, ticker, fee, markets): rc = client_post(client, f"{BASE_URI}/forceexit", data={"tradeid": "5"}) assert_response(rc) - assert rc.json() == {'result': 'Created sell order for trade 5.'} + assert rc.json() == {'result': 'Created exit order for trade 5.'} Trade.rollback() trade = Trade.get_trades([Trade.id == 5]).first() @@ -1578,6 +1578,47 @@ def test_api_strategy(botclient): assert_response(rc, 500) +def test_api_exchanges(botclient): + ftbot, client = botclient + + rc = client_get(client, f"{BASE_URI}/exchanges") + assert_response(rc) + response = rc.json() + assert isinstance(response['exchanges'], list) + assert len(response['exchanges']) > 20 + okx = [x for x in response['exchanges'] if x['name'] == 'okx'][0] + assert okx == { + "name": "okx", + "valid": True, + "supported": True, + "comment": "", + "trade_modes": [ + { + "trading_mode": "spot", + "margin_mode": "" + }, + { + "trading_mode": "futures", + "margin_mode": "isolated" + } + ] + } + + mexc = [x for x in response['exchanges'] if x['name'] == 'mexc'][0] + assert mexc == { + "name": "mexc", + "valid": True, + "supported": False, + "comment": "", + "trade_modes": [ + { + "trading_mode": "spot", + "margin_mode": "" + } + ] + } + + def test_api_freqaimodels(botclient, tmpdir, mocker): ftbot, client = botclient ftbot.config['user_data_dir'] = Path(tmpdir) @@ -1616,6 +1657,122 @@ def test_api_freqaimodels(botclient, tmpdir, mocker): ]} +def test_api_pairlists_available(botclient, tmpdir): + ftbot, client = botclient + ftbot.config['user_data_dir'] = Path(tmpdir) + + rc = client_get(client, f"{BASE_URI}/pairlists/available") + + assert_response(rc, 503) + assert rc.json()['detail'] == 'Bot is not in the correct state.' + + ftbot.config['runmode'] = RunMode.WEBSERVER + + rc = client_get(client, f"{BASE_URI}/pairlists/available") + assert_response(rc) + response = rc.json() + assert isinstance(response['pairlists'], list) + assert len(response['pairlists']) > 0 + + assert len([r for r in response['pairlists'] if r['name'] == 'AgeFilter']) == 1 + assert len([r for r in response['pairlists'] if r['name'] == 'VolumePairList']) == 1 + assert len([r for r in response['pairlists'] if r['name'] == 'StaticPairList']) == 1 + + volumepl = [r for r in response['pairlists'] if r['name'] == 'VolumePairList'][0] + assert volumepl['is_pairlist_generator'] is True + assert len(volumepl['params']) > 1 + age_pl = [r for r in response['pairlists'] if r['name'] == 'AgeFilter'][0] + assert age_pl['is_pairlist_generator'] is False + assert len(volumepl['params']) > 2 + + +def test_api_pairlists_evaluate(botclient, tmpdir, mocker): + ftbot, client = botclient + ftbot.config['user_data_dir'] = Path(tmpdir) + + rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/randomJob") + + assert_response(rc, 503) + assert rc.json()['detail'] == 'Bot is not in the correct state.' + + ftbot.config['runmode'] = RunMode.WEBSERVER + + rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/randomJob") + assert_response(rc, 404) + assert rc.json()['detail'] == 'Job not found.' + + body = { + "pairlists": [ + {"method": "StaticPairList", }, + ], + "blacklist": [ + ], + "stake_currency": "BTC" + } + # Fail, already running + ApiBG.pairlist_running = True + rc = client_post(client, f"{BASE_URI}/pairlists/evaluate", body) + assert_response(rc, 400) + assert rc.json()['detail'] == 'Pairlist evaluation is already running.' + + # should start the run + ApiBG.pairlist_running = False + rc = client_post(client, f"{BASE_URI}/pairlists/evaluate", body) + assert_response(rc) + assert rc.json()['status'] == 'Pairlist evaluation started in background.' + job_id = rc.json()['job_id'] + + rc = client_get(client, f"{BASE_URI}/background/RandomJob") + assert_response(rc, 404) + assert rc.json()['detail'] == 'Job not found.' + + rc = client_get(client, f"{BASE_URI}/background/{job_id}") + assert_response(rc) + response = rc.json() + assert response['job_id'] == job_id + assert response['job_category'] == 'pairlist' + + rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/{job_id}") + assert_response(rc) + response = rc.json() + assert response['result']['whitelist'] == ['ETH/BTC', 'LTC/BTC', 'XRP/BTC', 'NEO/BTC',] + assert response['result']['length'] == 4 + + # Restart with additional filter, reducing the list to 2 + body['pairlists'].append({"method": "OffsetFilter", "number_assets": 2}) + rc = client_post(client, f"{BASE_URI}/pairlists/evaluate", body) + assert_response(rc) + assert rc.json()['status'] == 'Pairlist evaluation started in background.' + job_id = rc.json()['job_id'] + + rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/{job_id}") + assert_response(rc) + response = rc.json() + assert response['result']['whitelist'] == ['ETH/BTC', 'LTC/BTC', ] + assert response['result']['length'] == 2 + # Patch __run_pairlists + plm = mocker.patch('freqtrade.rpc.api_server.api_background_tasks.__run_pairlist', + return_value=None) + body = { + "pairlists": [ + {"method": "StaticPairList", }, + ], + "blacklist": [ + ], + "stake_currency": "BTC", + "exchange": "randomExchange", + "trading_mode": "futures", + "margin_mode": "isolated", + } + rc = client_post(client, f"{BASE_URI}/pairlists/evaluate", body) + assert_response(rc) + assert plm.call_count == 1 + call_config = plm.call_args_list[0][0][1] + assert call_config['exchange']['name'] == 'randomExchange' + assert call_config['trading_mode'] == 'futures' + assert call_config['margin_mode'] == 'isolated' + + def test_list_available_pairs(botclient): ftbot, client = botclient @@ -1673,7 +1830,8 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmpdir): rc = client_get(client, f"{BASE_URI}/backtest") # Backtest prevented in default mode - assert_response(rc, 502) + assert_response(rc, 503) + assert rc.json()['detail'] == 'Bot is not in the correct state.' ftbot.config['runmode'] = RunMode.WEBSERVER # Backtesting not started yet @@ -1812,7 +1970,9 @@ def test_api_backtest_history(botclient, mocker, testdatadir): ]) rc = client_get(client, f"{BASE_URI}/backtest/history") - assert_response(rc, 502) + assert_response(rc, 503) + assert rc.json()['detail'] == 'Bot is not in the correct state.' + ftbot.config['user_data_dir'] = testdatadir ftbot.config['runmode'] = RunMode.WEBSERVER @@ -1930,3 +2090,4 @@ def test_api_ws_send_msg(default_conf, mocker, caplog): finally: ApiServer.shutdown() + ApiServer.shutdown() diff --git a/tests/strategy/strats/lookahead_bias/strategy_test_v3_with_lookahead_bias.py b/tests/strategy/strats/lookahead_bias/strategy_test_v3_with_lookahead_bias.py new file mode 100644 index 000000000..e50d5d17b --- /dev/null +++ b/tests/strategy/strats/lookahead_bias/strategy_test_v3_with_lookahead_bias.py @@ -0,0 +1,58 @@ +# pragma pylint: disable=missing-docstring, invalid-name, pointless-string-statement +from pandas import DataFrame +from technical.indicators import ichimoku + +from freqtrade.strategy import IStrategy +from freqtrade.strategy.parameters import CategoricalParameter + + +class strategy_test_v3_with_lookahead_bias(IStrategy): + INTERFACE_VERSION = 3 + + # Minimal ROI designed for the strategy + minimal_roi = { + "40": 0.0, + "30": 0.01, + "20": 0.02, + "0": 0.04 + } + + # Optimal stoploss designed for the strategy + stoploss = -0.10 + + # Optimal timeframe for the strategy + timeframe = '5m' + scenario = CategoricalParameter(['no_bias', 'bias1'], default='bias1', space="buy") + + # Number of candles the strategy requires before producing valid signals + startup_candle_count: int = 20 + + def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + # bias is introduced here + if self.scenario.value != 'no_bias': + ichi = ichimoku(dataframe, + conversion_line_period=20, + base_line_periods=60, + laggin_span=120, + displacement=30) + dataframe['chikou_span'] = ichi['chikou_span'] + + return dataframe + + def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + if self.scenario.value == 'no_bias': + dataframe.loc[dataframe['close'].shift(10) < dataframe['close'], 'enter_long'] = 1 + else: + dataframe.loc[dataframe['close'].shift(-10) > dataframe['close'], 'enter_long'] = 1 + + return dataframe + + def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: + if self.scenario.value == 'no_bias': + dataframe.loc[ + dataframe['close'].shift(10) < dataframe['close'], 'exit'] = 1 + else: + dataframe.loc[ + dataframe['close'].shift(-10) > dataframe['close'], 'exit'] = 1 + + return dataframe diff --git a/tests/test_configuration.py b/tests/test_configuration.py index 5b09abbd3..7808fb5c8 100644 --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -1,7 +1,5 @@ # pragma pylint: disable=missing-docstring, protected-access, invalid-name import json -import logging -import sys import warnings from copy import deepcopy from pathlib import Path @@ -23,8 +21,6 @@ from freqtrade.configuration.load_config import (load_config_file, load_file, lo from freqtrade.constants import DEFAULT_DB_DRYRUN_URL, DEFAULT_DB_PROD_URL, ENV_VAR_PREFIX from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException -from freqtrade.loggers import (FTBufferingHandler, FTStdErrStreamHandler, _set_loggers, - setup_logging, setup_logging_pre) from tests.conftest import (CURRENT_TEST_STRATEGY, log_has, log_has_re, patched_configuration_load_config_file) @@ -594,7 +590,7 @@ def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) # Prevent setting loggers - mocker.patch('freqtrade.loggers._set_loggers', MagicMock) + mocker.patch('freqtrade.loggers.set_loggers', MagicMock) arglist = ['trade', '-vvv'] args = Arguments(arglist).get_parsed_arg() @@ -605,127 +601,6 @@ def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None: assert log_has('Verbosity set to 3', caplog) -def test_set_loggers() -> None: - # Reset Logging to Debug, otherwise this fails randomly as it's set globally - logging.getLogger('requests').setLevel(logging.DEBUG) - logging.getLogger("urllib3").setLevel(logging.DEBUG) - logging.getLogger('ccxt.base.exchange').setLevel(logging.DEBUG) - logging.getLogger('telegram').setLevel(logging.DEBUG) - - previous_value1 = logging.getLogger('requests').level - previous_value2 = logging.getLogger('ccxt.base.exchange').level - previous_value3 = logging.getLogger('telegram').level - - _set_loggers() - - value1 = logging.getLogger('requests').level - assert previous_value1 is not value1 - assert value1 is logging.INFO - - value2 = logging.getLogger('ccxt.base.exchange').level - assert previous_value2 is not value2 - assert value2 is logging.INFO - - value3 = logging.getLogger('telegram').level - assert previous_value3 is not value3 - assert value3 is logging.INFO - - _set_loggers(verbosity=2) - - assert logging.getLogger('requests').level is logging.DEBUG - assert logging.getLogger('ccxt.base.exchange').level is logging.INFO - assert logging.getLogger('telegram').level is logging.INFO - assert logging.getLogger('werkzeug').level is logging.INFO - - _set_loggers(verbosity=3, api_verbosity='error') - - assert logging.getLogger('requests').level is logging.DEBUG - assert logging.getLogger('ccxt.base.exchange').level is logging.DEBUG - assert logging.getLogger('telegram').level is logging.INFO - assert logging.getLogger('werkzeug').level is logging.ERROR - - -@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -def test_set_loggers_syslog(): - logger = logging.getLogger() - orig_handlers = logger.handlers - logger.handlers = [] - - config = {'verbosity': 2, - 'logfile': 'syslog:/dev/log', - } - - setup_logging_pre() - setup_logging(config) - assert len(logger.handlers) == 3 - assert [x for x in logger.handlers if type(x) == logging.handlers.SysLogHandler] - assert [x for x in logger.handlers if type(x) == FTStdErrStreamHandler] - assert [x for x in logger.handlers if type(x) == FTBufferingHandler] - # setting up logging again should NOT cause the loggers to be added a second time. - setup_logging(config) - assert len(logger.handlers) == 3 - # reset handlers to not break pytest - logger.handlers = orig_handlers - - -@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -def test_set_loggers_Filehandler(tmpdir): - logger = logging.getLogger() - orig_handlers = logger.handlers - logger.handlers = [] - logfile = Path(tmpdir) / 'ft_logfile.log' - config = {'verbosity': 2, - 'logfile': str(logfile), - } - - setup_logging_pre() - setup_logging(config) - assert len(logger.handlers) == 3 - assert [x for x in logger.handlers if type(x) == logging.handlers.RotatingFileHandler] - assert [x for x in logger.handlers if type(x) == FTStdErrStreamHandler] - assert [x for x in logger.handlers if type(x) == FTBufferingHandler] - # setting up logging again should NOT cause the loggers to be added a second time. - setup_logging(config) - assert len(logger.handlers) == 3 - # reset handlers to not break pytest - if logfile.exists: - logfile.unlink() - logger.handlers = orig_handlers - - -@pytest.mark.skip(reason="systemd is not installed on every system, so we're not testing this.") -def test_set_loggers_journald(mocker): - logger = logging.getLogger() - orig_handlers = logger.handlers - logger.handlers = [] - - config = {'verbosity': 2, - 'logfile': 'journald', - } - - setup_logging_pre() - setup_logging(config) - assert len(logger.handlers) == 3 - assert [x for x in logger.handlers if type(x).__name__ == "JournaldLogHandler"] - assert [x for x in logger.handlers if type(x) == FTStdErrStreamHandler] - # reset handlers to not break pytest - logger.handlers = orig_handlers - - -def test_set_loggers_journald_importerror(import_fails): - logger = logging.getLogger() - orig_handlers = logger.handlers - logger.handlers = [] - - config = {'verbosity': 2, - 'logfile': 'journald', - } - with pytest.raises(OperationalException, - match=r'You need the cysystemd python package.*'): - setup_logging(config) - logger.handlers = orig_handlers - - def test_set_logfile(default_conf, mocker, tmpdir): patched_configuration_load_config_file(mocker, default_conf) f = Path(tmpdir / "test_file.log") diff --git a/tests/test_freqtradebot.py b/tests/test_freqtradebot.py index 71f494372..3bfa5a127 100644 --- a/tests/test_freqtradebot.py +++ b/tests/test_freqtradebot.py @@ -1241,6 +1241,8 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_ 'type': 'stop_loss_limit', 'price': 3, 'average': 2, + 'filled': enter_order['amount'], + 'remaining': 0, 'amount': enter_order['amount'], }) mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit) @@ -1603,13 +1605,13 @@ def test_create_stoploss_order_insufficient_funds( assert mock_insuf.call_count == 1 -@pytest.mark.parametrize("is_short,bid,ask,stop_price,amt,hang_price", [ - (False, [4.38, 4.16], [4.4, 4.17], ['2.0805', 4.4 * 0.95], 27.39726027, 3), - (True, [1.09, 1.21], [1.1, 1.22], ['2.321', 1.09 * 1.05], 27.27272727, 1.5), +@pytest.mark.parametrize("is_short,bid,ask,stop_price,hang_price", [ + (False, [4.38, 4.16], [4.4, 4.17], ['2.0805', 4.4 * 0.95], 3), + (True, [1.09, 1.21], [1.1, 1.22], ['2.321', 1.09 * 1.05], 1.5), ]) @pytest.mark.usefixtures("init_persistence") def test_handle_stoploss_on_exchange_trailing( - mocker, default_conf_usdt, fee, is_short, bid, ask, limit_order, stop_price, amt, hang_price + mocker, default_conf_usdt, fee, is_short, bid, ask, limit_order, stop_price, hang_price ) -> None: # When trailing stoploss is set enter_order = limit_order[entry_side(is_short)] @@ -1624,8 +1626,8 @@ def test_handle_stoploss_on_exchange_trailing( 'last': 2.19, }), create_order=MagicMock(side_effect=[ - {'id': enter_order['id']}, - {'id': exit_order['id']}, + enter_order, + exit_order, ]), get_fee=fee, ) @@ -1721,7 +1723,7 @@ def test_handle_stoploss_on_exchange_trailing( cancel_order_mock.assert_called_once_with('100', 'ETH/USDT') stoploss_order_mock.assert_called_once_with( - amount=pytest.approx(amt), + amount=30, pair='ETH/USDT', order_types=freqtrade.strategy.order_types, stop_price=stop_price[1], @@ -1990,7 +1992,7 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde enter_order = limit_order['buy'] exit_order = limit_order['sell'] - + enter_order['average'] = 2.19 # When trailing stoploss is set stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'}) patch_RPCManager(mocker) @@ -2007,8 +2009,8 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde 'last': 2.19 }), create_order=MagicMock(side_effect=[ - {'id': enter_order['id']}, - {'id': exit_order['id']}, + enter_order, + exit_order, ]), get_fee=fee, create_stoploss=stoploss, @@ -2104,7 +2106,7 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde assert trade.stop_loss == 4.4 * 0.99 cancel_order_mock.assert_called_once_with('100', 'NEO/BTC') stoploss_order_mock.assert_called_once_with( - amount=pytest.approx(11.41438356), + amount=30, pair='NEO/BTC', order_types=freqtrade.strategy.order_types, stop_price=4.4 * 0.99, @@ -2974,11 +2976,12 @@ def test_manage_open_orders_exit_usercustom( ) -> None: default_conf_usdt["unfilledtimeout"] = {"entry": 1440, "exit": 1440, "exit_timeout_count": 1} open_trade_usdt.open_order_id = limit_sell_order_old['id'] - order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked', 'sell') - open_trade_usdt.orders[0] = order if is_short: limit_sell_order_old['side'] = 'buy' open_trade_usdt.is_short = is_short + open_exit_order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked', + 'buy' if is_short else 'sell') + open_trade_usdt.orders[-1] = open_exit_order rpc_mock = patch_RPCManager(mocker) cancel_order_mock = MagicMock() diff --git a/tests/test_log_setup.py b/tests/test_log_setup.py new file mode 100644 index 000000000..bd3399615 --- /dev/null +++ b/tests/test_log_setup.py @@ -0,0 +1,150 @@ +import logging +import sys +from pathlib import Path + +import pytest + +from freqtrade.exceptions import OperationalException +from freqtrade.loggers import (FTBufferingHandler, FTStdErrStreamHandler, set_loggers, + setup_logging, setup_logging_pre) +from freqtrade.loggers.set_log_levels import (reduce_verbosity_for_bias_tester, + restore_verbosity_for_bias_tester) + + +def test_set_loggers() -> None: + # Reset Logging to Debug, otherwise this fails randomly as it's set globally + logging.getLogger('requests').setLevel(logging.DEBUG) + logging.getLogger("urllib3").setLevel(logging.DEBUG) + logging.getLogger('ccxt.base.exchange').setLevel(logging.DEBUG) + logging.getLogger('telegram').setLevel(logging.DEBUG) + + previous_value1 = logging.getLogger('requests').level + previous_value2 = logging.getLogger('ccxt.base.exchange').level + previous_value3 = logging.getLogger('telegram').level + + set_loggers() + + value1 = logging.getLogger('requests').level + assert previous_value1 is not value1 + assert value1 is logging.INFO + + value2 = logging.getLogger('ccxt.base.exchange').level + assert previous_value2 is not value2 + assert value2 is logging.INFO + + value3 = logging.getLogger('telegram').level + assert previous_value3 is not value3 + assert value3 is logging.INFO + + set_loggers(verbosity=2) + + assert logging.getLogger('requests').level is logging.DEBUG + assert logging.getLogger('ccxt.base.exchange').level is logging.INFO + assert logging.getLogger('telegram').level is logging.INFO + assert logging.getLogger('werkzeug').level is logging.INFO + + set_loggers(verbosity=3, api_verbosity='error') + + assert logging.getLogger('requests').level is logging.DEBUG + assert logging.getLogger('ccxt.base.exchange').level is logging.DEBUG + assert logging.getLogger('telegram').level is logging.INFO + assert logging.getLogger('werkzeug').level is logging.ERROR + + +@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") +def test_set_loggers_syslog(): + logger = logging.getLogger() + orig_handlers = logger.handlers + logger.handlers = [] + + config = {'verbosity': 2, + 'logfile': 'syslog:/dev/log', + } + + setup_logging_pre() + setup_logging(config) + assert len(logger.handlers) == 3 + assert [x for x in logger.handlers if type(x) == logging.handlers.SysLogHandler] + assert [x for x in logger.handlers if type(x) == FTStdErrStreamHandler] + assert [x for x in logger.handlers if type(x) == FTBufferingHandler] + # setting up logging again should NOT cause the loggers to be added a second time. + setup_logging(config) + assert len(logger.handlers) == 3 + # reset handlers to not break pytest + logger.handlers = orig_handlers + + +@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") +def test_set_loggers_Filehandler(tmpdir): + logger = logging.getLogger() + orig_handlers = logger.handlers + logger.handlers = [] + logfile = Path(tmpdir) / 'ft_logfile.log' + config = {'verbosity': 2, + 'logfile': str(logfile), + } + + setup_logging_pre() + setup_logging(config) + assert len(logger.handlers) == 3 + assert [x for x in logger.handlers if type(x) == logging.handlers.RotatingFileHandler] + assert [x for x in logger.handlers if type(x) == FTStdErrStreamHandler] + assert [x for x in logger.handlers if type(x) == FTBufferingHandler] + # setting up logging again should NOT cause the loggers to be added a second time. + setup_logging(config) + assert len(logger.handlers) == 3 + # reset handlers to not break pytest + if logfile.exists: + logfile.unlink() + logger.handlers = orig_handlers + + +@pytest.mark.skip(reason="systemd is not installed on every system, so we're not testing this.") +def test_set_loggers_journald(mocker): + logger = logging.getLogger() + orig_handlers = logger.handlers + logger.handlers = [] + + config = {'verbosity': 2, + 'logfile': 'journald', + } + + setup_logging_pre() + setup_logging(config) + assert len(logger.handlers) == 3 + assert [x for x in logger.handlers if type(x).__name__ == "JournaldLogHandler"] + assert [x for x in logger.handlers if type(x) == FTStdErrStreamHandler] + # reset handlers to not break pytest + logger.handlers = orig_handlers + + +def test_set_loggers_journald_importerror(import_fails): + logger = logging.getLogger() + orig_handlers = logger.handlers + logger.handlers = [] + + config = {'verbosity': 2, + 'logfile': 'journald', + } + with pytest.raises(OperationalException, + match=r'You need the cysystemd python package.*'): + setup_logging(config) + logger.handlers = orig_handlers + + +def test_reduce_verbosity(): + setup_logging_pre() + reduce_verbosity_for_bias_tester() + prior_level = logging.getLogger('freqtrade').getEffectiveLevel() + + assert logging.getLogger('freqtrade.resolvers').getEffectiveLevel() == logging.WARNING + assert logging.getLogger('freqtrade.strategy.hyper').getEffectiveLevel() == logging.WARNING + # base level wasn't changed + assert logging.getLogger('freqtrade').getEffectiveLevel() == prior_level + + restore_verbosity_for_bias_tester() + + assert logging.getLogger('freqtrade.resolvers').getEffectiveLevel() == prior_level + assert logging.getLogger('freqtrade.strategy.hyper').getEffectiveLevel() == prior_level + assert logging.getLogger('freqtrade').getEffectiveLevel() == prior_level + # base level wasn't changed diff --git a/tests/test_main.py b/tests/test_main.py index 59a5bb0f7..bdb3c2bba 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -118,7 +118,7 @@ def test_main_operational_exception(mocker, default_conf, caplog) -> None: def test_main_operational_exception1(mocker, default_conf, caplog) -> None: patch_exchange(mocker) mocker.patch( - 'freqtrade.commands.list_commands.validate_exchanges', + 'freqtrade.commands.list_commands.list_available_exchanges', MagicMock(side_effect=ValueError('Oh snap!')) ) patched_configuration_load_config_file(mocker, default_conf) @@ -132,7 +132,7 @@ def test_main_operational_exception1(mocker, default_conf, caplog) -> None: assert log_has('Fatal exception!', caplog) assert not log_has_re(r'SIGINT.*', caplog) mocker.patch( - 'freqtrade.commands.list_commands.validate_exchanges', + 'freqtrade.commands.list_commands.list_available_exchanges', MagicMock(side_effect=KeyboardInterrupt) ) with pytest.raises(SystemExit):