Merge in develop changes

This commit is contained in:
froggleston
2023-07-15 16:16:08 +01:00
218 changed files with 10245 additions and 4896 deletions

View File

@@ -1,11 +1,12 @@
FROM freqtradeorg/freqtrade:develop
FROM freqtradeorg/freqtrade:develop_freqairl
USER root
# Install dependencies
COPY requirements-dev.txt /freqtrade/
RUN apt-get update \
&& apt-get -y install git mercurial sudo vim build-essential \
&& apt-get -y install --no-install-recommends apt-utils dialog \
&& apt-get -y install --no-install-recommends git sudo vim build-essential \
&& apt-get clean \
&& mkdir -p /home/ftuser/.vscode-server /home/ftuser/.vscode-server-insiders /home/ftuser/commandhistory \
&& echo "export PROMPT_COMMAND='history -a'" >> /home/ftuser/.bashrc \

View File

@@ -19,23 +19,24 @@
"postCreateCommand": "freqtrade create-userdir --userdir user_data/",
"workspaceFolder": "/workspaces/freqtrade",
"settings": {
"terminal.integrated.shell.linux": "/bin/bash",
"editor.insertSpaces": true,
"files.trimTrailingWhitespace": true,
"[markdown]": {
"files.trimTrailingWhitespace": false,
"customizations": {
"settings": {
"terminal.integrated.shell.linux": "/bin/bash",
"editor.insertSpaces": true,
"files.trimTrailingWhitespace": true,
"[markdown]": {
"files.trimTrailingWhitespace": false,
},
"python.pythonPath": "/usr/local/bin/python",
},
"python.pythonPath": "/usr/local/bin/python",
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance",
"davidanson.vscode-markdownlint",
"ms-azuretools.vscode-docker",
"vscode-icons-team.vscode-icons",
],
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance",
"davidanson.vscode-markdownlint",
"ms-azuretools.vscode-docker",
"vscode-icons-team.vscode-icons",
],
}
}

View File

@@ -14,7 +14,7 @@ on:
- cron: '0 5 * * 4'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
group: "${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}"
cancel-in-progress: true
permissions:
repository-projects: read
@@ -57,7 +57,7 @@ jobs:
- name: Installation - *nix
if: runner.os == 'Linux'
run: |
python -m pip install --upgrade pip==23.0.1 wheel
python -m pip install --upgrade pip wheel
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
export TA_INCLUDE_PATH=${HOME}/dependencies/include
@@ -77,6 +77,17 @@ jobs:
# Allow failure for coveralls
coveralls || true
- name: Check for repository changes
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "Repository is dirty, changes detected:"
git status
git diff
exit 1
else
echo "Repository is clean, no changes detected."
fi
- name: Backtesting (multi)
run: |
cp config_examples/config_bittrex.example.json config.json
@@ -125,6 +136,7 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
check-latest: true
- name: Cache_dependencies
uses: actions/cache@v3
@@ -148,7 +160,8 @@ jobs:
- name: Installation - macOS
if: runner.os == 'macOS'
run: |
brew update
# brew update
# TODO: Should be the brew upgrade
# homebrew fails to update python due to unlinking failures
# https://github.com/actions/runner-images/issues/6817
rm /usr/local/bin/2to3 || true
@@ -163,7 +176,7 @@ jobs:
rm /usr/local/bin/python3.11-config || true
brew install hdf5 c-blosc
python -m pip install --upgrade pip==23.0.1 wheel
python -m pip install --upgrade pip wheel
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
export TA_INCLUDE_PATH=${HOME}/dependencies/include
@@ -174,6 +187,17 @@ jobs:
run: |
pytest --random-order
- name: Check for repository changes
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "Repository is dirty, changes detected:"
git status
git diff
exit 1
else
echo "Repository is clean, no changes detected."
fi
- name: Backtesting
run: |
cp config_examples/config_bittrex.example.json config.json
@@ -237,6 +261,18 @@ jobs:
run: |
pytest --random-order
- name: Check for repository changes
run: |
if (git status --porcelain) {
Write-Host "Repository is dirty, changes detected:"
git status
git diff
exit 1
}
else {
Write-Host "Repository is clean, no changes detected."
}
- name: Backtesting
run: |
cp config_examples/config_bittrex.example.json config.json
@@ -302,7 +338,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.10"
python-version: "3.11"
- name: Documentation build
run: |
@@ -352,7 +388,7 @@ jobs:
- name: Installation - *nix
if: runner.os == 'Linux'
run: |
python -m pip install --upgrade pip==23.0.1 wheel
python -m pip install --upgrade pip wheel
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
export TA_INCLUDE_PATH=${HOME}/dependencies/include
@@ -425,7 +461,7 @@ jobs:
python setup.py sdist bdist_wheel
- name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@v1.8.5
uses: pypa/gh-action-pypi-publish@v1.8.7
if: (github.event_name == 'release')
with:
user: __token__
@@ -433,7 +469,7 @@ jobs:
repository_url: https://test.pypi.org/legacy/
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.8.5
uses: pypa/gh-action-pypi-publish@v1.8.7
if: (github.event_name == 'release')
with:
user: __token__

View File

@@ -8,17 +8,17 @@ repos:
# stages: [push]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "v1.0.1"
rev: "v1.3.0"
hooks:
- id: mypy
exclude: build_helpers
additional_dependencies:
- types-cachetools==5.3.0.5
- types-filelock==3.2.7
- types-requests==2.28.11.17
- types-requests==2.31.0.1
- types-tabulate==0.9.0.2
- types-python-dateutil==2.8.19.12
- SQLAlchemy==2.0.9
- types-python-dateutil==2.8.19.13
- SQLAlchemy==2.0.18
# stages: [push]
- repo: https://github.com/pycqa/isort
@@ -30,7 +30,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.255'
rev: 'v0.0.270'
hooks:
- id: ruff

View File

@@ -1,4 +1,4 @@
FROM python:3.10.11-slim-bullseye as base
FROM python:3.11.4-slim-bullseye as base
# Setup env
ENV LANG C.UTF-8
@@ -25,7 +25,7 @@ FROM base as python-deps
RUN apt-get update \
&& apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \
&& apt-get clean \
&& pip install --upgrade pip==23.0.1
&& pip install --upgrade pip wheel
# Install TA-lib
COPY build_helpers/* /tmp/

View File

@@ -1,7 +1,7 @@
# Downloads don't work automatically, since the URL is regenerated via javascript.
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
python -m pip install --upgrade pip==23.0.1 wheel
python -m pip install --upgrade pip wheel
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"

View File

@@ -6,6 +6,15 @@ services:
# image: freqtradeorg/freqtrade:develop
# Use plotting image
# image: freqtradeorg/freqtrade:develop_plot
# # Enable GPU Image and GPU Resources (only relevant for freqAI)
# # Make sure to uncomment the whole deploy section
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# Build step - only needed when additional dependencies are needed
# build:
# context: .
@@ -16,7 +25,7 @@ services:
- "./user_data:/freqtrade/user_data"
# Expose api on port 8080 (localhost only)
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
# before enabling this.
# for more information.
ports:
- "127.0.0.1:8080:8080"
# Default command used when running `docker compose up`

View File

@@ -0,0 +1,36 @@
---
version: '3'
services:
freqtrade:
image: freqtradeorg/freqtrade:stable_freqaitorch
# # Enable GPU Image and GPU Resources
# # Make sure to uncomment the whole deploy section
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# Build step - only needed when additional dependencies are needed
# build:
# context: .
# dockerfile: "./docker/Dockerfile.custom"
restart: unless-stopped
container_name: freqtrade
volumes:
- "./user_data:/freqtrade/user_data"
# Expose api on port 8080 (localhost only)
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
# for more information.
ports:
- "127.0.0.1:8080:8080"
# Default command used when running `docker compose up`
command: >
trade
--logfile /freqtrade/user_data/logs/freqtrade.log
--db-url sqlite:////freqtrade/user_data/tradesv3.sqlite
--config /freqtrade/user_data/config.json
--freqai-model XGBoostClassifier
--strategy SampleStrategy

View File

@@ -29,7 +29,7 @@ If all goes well, you should now see a `backtest-result-{timestamp}_signals.pkl`
`user_data/backtest_results` folder.
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
with `--analysis-groups` option provided with space-separated arguments (default `0 1 2`):
with `--analysis-groups` option provided with space-separated arguments:
``` bash
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4 5
@@ -39,6 +39,7 @@ This command will read from the last backtesting results. The `--analysis-groups
used to specify the various tabular outputs showing the profit fo each group or trade,
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
* 0: overall winrate and profit summary by enter_tag
* 1: profit summaries grouped by enter_tag
* 2: profit summaries grouped by enter_tag and exit_tag
* 3: profit summaries grouped by pair and enter_tag
@@ -115,3 +116,38 @@ For example, if your backtest timerange was `20220101-20221231` but you only wan
```bash
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
```
### Printing out rejected signals
Use the `--rejected-signals` option to print out rejected signals.
```bash
freqtrade backtesting-analysis -c <config.json> --rejected-signals
```
### Writing tables to CSV
Some of the tabular outputs can become large, so printing them out to the terminal is not preferable.
Use the `--analysis-to-csv` option to disable printing out of tables to standard out and write them to CSV files.
```bash
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv
```
By default this will write one file per output table you specified in the `backtesting-analysis` command, e.g.
```bash
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --rejected-signals --analysis-groups 0 1
```
This will write to `user_data/backtest_results`:
* rejected_signals.csv
* group_0.csv
* group_1.csv
To override where the files will be written, also specify the `--analysis-csv-path` option.
```bash
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --analysis-csv-path another/data/path/
```

View File

@@ -136,7 +136,7 @@ class MyAwesomeStrategy(IStrategy):
### Dynamic parameters
Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
Parameters can also be defined dynamically, but must be available to the instance once the [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
``` python

View File

@@ -138,7 +138,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `stake_currency` | **Required.** Crypto-currency used for trading. <br> **Datatype:** String
| `stake_amount` | **Required.** Amount of crypto-currency your bot will use for each trade. Set it to `"unlimited"` to allow the bot to use all available balance. [More information below](#configuring-amount-per-trade). <br> **Datatype:** Positive float or `"unlimited"`.
| `tradable_balance_ratio` | Ratio of the total account balance the bot is allowed to trade. [More information below](#configuring-amount-per-trade). <br>*Defaults to `0.99` 99%).*<br> **Datatype:** Positive float between `0.1` and `1.0`.
| `available_capital` | Available starting capital for the bot. Useful when running multiple bots on the same exchange account.[More information below](#configuring-amount-per-trade). <br> **Datatype:** Positive float.
| `available_capital` | Available starting capital for the bot. Useful when running multiple bots on the same exchange account. [More information below](#configuring-amount-per-trade). <br> **Datatype:** Positive float.
| `amend_last_stake_amount` | Use reduced last stake amount if necessary. [More information below](#configuring-amount-per-trade). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `last_stake_amount_min_ratio` | Defines minimum stake amount that has to be left and executed. Applies only to the last stake amount when it's amended to a reduced value (i.e. if `amend_last_stake_amount` is set to `true`). [More information below](#configuring-amount-per-trade). <br>*Defaults to `0.5`.* <br> **Datatype:** Float (as ratio)
| `amount_reserve_percent` | Reserve some amount in min pair stake amount. The bot will reserve `amount_reserve_percent` + stoploss value when calculating min pair stake amount in order to avoid possible trade refusals. <br>*Defaults to `0.05` (5%).* <br> **Datatype:** Positive Float as ratio.
@@ -155,25 +155,25 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `trailing_stop_positive_offset` | Offset on when to apply `trailing_stop_positive`. Percentage value which should be positive. More details in the [stoploss documentation](stoploss.md#trailing-stop-loss-only-once-the-trade-has-reached-a-certain-offset). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0` (no offset).* <br> **Datatype:** Float
| `trailing_only_offset_is_reached` | Only apply trailing stoploss when the offset is reached. [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `fee` | Fee used during backtesting / dry-runs. Should normally not be configured, which has freqtrade fall back to the exchange default fee. Set as ratio (e.g. 0.001 = 0.1%). Fee is applied twice for each trade, once when buying, once when selling. <br> **Datatype:** Float (as ratio)
| `futures_funding_rate` | User-specified funding rate to be used when historical funding rates are not available from the exchange. This does not overwrite real historical rates. It is recommended that this be set to 0 unless you are testing a specific coin and you understand how the funding rate will affect freqtrade's profit calculations. [More information here](leverage.md#unavailable-funding-rates) <br>*Defaults to None.*<br> **Datatype:** Float
| `futures_funding_rate` | User-specified funding rate to be used when historical funding rates are not available from the exchange. This does not overwrite real historical rates. It is recommended that this be set to 0 unless you are testing a specific coin and you understand how the funding rate will affect freqtrade's profit calculations. [More information here](leverage.md#unavailable-funding-rates) <br>*Defaults to `None`.*<br> **Datatype:** Float
| `trading_mode` | Specifies if you want to trade regularly, trade with leverage, or trade contracts whose prices are derived from matching cryptocurrency prices. [leverage documentation](leverage.md). <br>*Defaults to `"spot"`.* <br> **Datatype:** String
| `margin_mode` | When trading with leverage, this determines if the collateral owned by the trader will be shared or isolated to each trading pair [leverage documentation](leverage.md). <br> **Datatype:** String
| `liquidation_buffer` | A ratio specifying how large of a safety net to place between the liquidation price and the stoploss to prevent a position from reaching the liquidation price [leverage documentation](leverage.md). <br>*Defaults to `0.05`.* <br> **Datatype:** Float
| | **Unfilled timeout**
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.exit` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled exit order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.unit` | Unit to use in unfilledtimeout setting. Note: If you set unfilledtimeout.unit to "seconds", "internals.process_throttle_secs" must be inferior or equal to timeout [Strategy Override](#parameters-in-the-strategy). <br> *Defaults to `minutes`.* <br> **Datatype:** String
| `unfilledtimeout.unit` | Unit to use in unfilledtimeout setting. Note: If you set unfilledtimeout.unit to "seconds", "internals.process_throttle_secs" must be inferior or equal to timeout [Strategy Override](#parameters-in-the-strategy). <br> *Defaults to `"minutes"`.* <br> **Datatype:** String
| `unfilledtimeout.exit_timeout_count` | How many times can exit orders time out. Once this number of timeouts is reached, an emergency exit is triggered. 0 to disable and allow unlimited order cancels. [Strategy Override](#parameters-in-the-strategy).<br>*Defaults to `0`.* <br> **Datatype:** Integer
| | **Pricing**
| `entry_pricing.price_side` | Select the side of the spread the bot should look at to get the entry rate. [More information below](#buy-price-side).<br> *Defaults to `same`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
| `entry_pricing.price_side` | Select the side of the spread the bot should look at to get the entry rate. [More information below](#entry-price).<br> *Defaults to `"same"`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
| `entry_pricing.price_last_balance` | **Required.** Interpolate the bidding price. More information [below](#entry-price-without-orderbook-enabled).
| `entry_pricing.use_order_book` | Enable entering using the rates in [Order Book Entry](#entry-price-with-orderbook-enabled). <br> *Defaults to `True`.*<br> **Datatype:** Boolean
| `entry_pricing.use_order_book` | Enable entering using the rates in [Order Book Entry](#entry-price-with-orderbook-enabled). <br> *Defaults to `true`.*<br> **Datatype:** Boolean
| `entry_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to enter a trade. I.e. a value of 2 will allow the bot to pick the 2nd entry in [Order Book Entry](#entry-price-with-orderbook-enabled). <br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
| `entry_pricing. check_depth_of_market.enabled` | Do not enter if the difference of buy orders and sell orders is met in Order Book. [Check market depth](#check-depth-of-market). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `entry_pricing. check_depth_of_market.bids_to_ask_delta` | The difference ratio of buy orders and sell orders found in Order Book. A value below 1 means sell order size is greater, while value greater than 1 means buy order size is higher. [Check market depth](#check-depth-of-market) <br> *Defaults to `0`.* <br> **Datatype:** Float (as ratio)
| `exit_pricing.price_side` | Select the side of the spread the bot should look at to get the exit rate. [More information below](#exit-price-side).<br> *Defaults to `same`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
| `exit_pricing.price_side` | Select the side of the spread the bot should look at to get the exit rate. [More information below](#exit-price-side).<br> *Defaults to `"same"`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
| `exit_pricing.price_last_balance` | Interpolate the exiting price. More information [below](#exit-price-without-orderbook-enabled).
| `exit_pricing.use_order_book` | Enable exiting of open trades using [Order Book Exit](#exit-price-with-orderbook-enabled). <br> *Defaults to `True`.*<br> **Datatype:** Boolean
| `exit_pricing.use_order_book` | Enable exiting of open trades using [Order Book Exit](#exit-price-with-orderbook-enabled). <br> *Defaults to `true`.*<br> **Datatype:** Boolean
| `exit_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to exit. I.e. a value of 2 will allow the bot to pick the 2nd ask rate in [Order Book Exit](#exit-price-with-orderbook-enabled)<br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
| `custom_price_max_distance_ratio` | Configure maximum distance ratio between current and custom entry or exit price. <br>*Defaults to `0.02` 2%).*<br> **Datatype:** Positive float
| | **TODO**
@@ -199,10 +199,10 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `exchange.ccxt_sync_config` | Additional CCXT parameters passed to the regular (sync) ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> **Datatype:** Dict
| `exchange.ccxt_async_config` | Additional CCXT parameters passed to the async ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> **Datatype:** Dict
| `exchange.markets_refresh_interval` | The interval in minutes in which markets are reloaded. <br>*Defaults to `60` minutes.* <br> **Datatype:** Positive Integer
| `exchange.skip_pair_validation` | Skip pairlist validation on startup.<br>*Defaults to `false`<br> **Datatype:** Boolean
| `exchange.skip_open_order_update` | Skips open order updates on startup should the exchange cause problems. Only relevant in live conditions.<br>*Defaults to `false`<br> **Datatype:** Boolean
| `exchange.skip_pair_validation` | Skip pairlist validation on startup.<br>*Defaults to `false`*<br> **Datatype:** Boolean
| `exchange.skip_open_order_update` | Skips open order updates on startup should the exchange cause problems. Only relevant in live conditions.<br>*Defaults to `false`*<br> **Datatype:** Boolean
| `exchange.unknown_fee_rate` | Fallback value to use when calculating trading fees. This can be useful for exchanges which have fees in non-tradable currencies. The value provided here will be multiplied with the "fee cost".<br>*Defaults to `None`<br> **Datatype:** float
| `exchange.log_responses` | Log relevant exchange responses. For debug mode only - use with care.<br>*Defaults to `false`<br> **Datatype:** Boolean
| `exchange.log_responses` | Log relevant exchange responses. For debug mode only - use with care.<br>*Defaults to `false`*<br> **Datatype:** Boolean
| `experimental.block_bad_exchanges` | Block exchanges known to not work with freqtrade. Leave on default unless you want to test if that exchange works now. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
| | **Plugins**
| `edge.*` | Please refer to [edge configuration document](edge.md) for detailed explanation of all possible configuration options.
@@ -213,7 +213,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `telegram.token` | Your Telegram bot token. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
| `telegram.chat_id` | Your personal Telegram account id. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
| `telegram.balance_dust_level` | Dust-level (in stake currency) - currencies with a balance below this will not be shown by `/balance`. <br> **Datatype:** float
| `telegram.reload` | Allow "reload" buttons on telegram messages. <br>*Defaults to `True`.<br> **Datatype:** boolean
| `telegram.reload` | Allow "reload" buttons on telegram messages. <br>*Defaults to `true`.<br> **Datatype:** boolean
| `telegram.notification_settings.*` | Detailed notification settings. Refer to the [telegram documentation](telegram-usage.md) for details.<br> **Datatype:** dictionary
| `telegram.allow_custom_messages` | Enable the sending of Telegram messages from strategies via the dataprovider.send_msg() function. <br> **Datatype:** Boolean
| | **Webhook**
@@ -682,16 +682,14 @@ To use a proxy for exchange connections - you will have to define the proxies as
{
"exchange": {
"ccxt_config": {
"aiohttp_proxy": "http://addr:port",
"proxies": {
"http": "http://addr:port",
"https": "http://addr:port"
},
"httpsProxy": "http://addr:port",
}
}
}
```
For more information on available proxy types, please consult the [ccxt proxy documentation](https://docs.ccxt.com/#/README?id=proxy).
## Next step
Now you have configured your config.json, the next step is to [start your bot](bot-usage.md).

View File

@@ -6,7 +6,7 @@ To download data (candles / OHLCV) needed for backtesting and hyperoptimization
If no additional parameter is specified, freqtrade will download data for `"1m"` and `"5m"` timeframes for the last 30 days.
Exchange and pairs will come from `config.json` (if specified using `-c/--config`).
Otherwise `--exchange` becomes mandatory.
Without provided configuration, `--exchange` becomes mandatory.
You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used.
@@ -83,40 +83,47 @@ Common arguments:
```
!!! Tip "Downloading all data for one quote currency"
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
!!! Note "Startup period"
`download-data` is a strategy-independent command. The idea is to download a big chunk of data once, and then iteratively increase the amount of data stored.
For that reason, `download-data` does not care about the "startup-period" defined in a strategy. It's up to the user to download additional days if the backtest should start at a specific point in time (while respecting startup period).
### Pairs file
### Start download
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
If you are using Binance for example:
- create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
- update the `pairs.json` file to contain the currency pairs you are interested in.
A very simple command (assuming an available `config.json` file) can look as follows.
```bash
mkdir -p user_data/data/binance
touch user_data/data/binance/pairs.json
freqtrade download-data --exchange binance
```
The format of the `pairs.json` file is a simple json list.
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
This will download historical candle (OHLCV) data for all the currency pairs defined in the configuration.
``` json
[
"ETH/BTC",
"ETH/USDT",
"BTC/USDT",
"XRP/ETH"
]
Alternatively, specify the pairs directly
```bash
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
```
!!! Tip "Downloading all data for one quote currency"
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
or as regex (in this case, to download all active USDT pairs)
```bash
freqtrade download-data --exchange binance --pairs .*/USDT
```
### Other Notes
* To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
* To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
* To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
* To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
* To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
??? Note "Permission denied errors"
If your configuration directory `user_data` was made by docker, you may get the following error:
@@ -131,39 +138,7 @@ Mixing different stake-currencies is allowed for this file, since it's only used
sudo chown -R $UID:$GID user_data
```
### Start download
Then run:
```bash
freqtrade download-data --exchange binance
```
This will download historical candle (OHLCV) data for all the currency pairs you defined in `pairs.json`.
Alternatively, specify the pairs directly
```bash
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
```
or as regex (to download all active USDT pairs)
```bash
freqtrade download-data --exchange binance --pairs .*/USDT
```
### Other Notes
- To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
- To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
- To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
- To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
- To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
- Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
- To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
#### Download additional data before the current timerange
### Download additional data before the current timerange
Assuming you downloaded all data from 2022 (`--timerange 20220101-`) - but you'd now like to also backtest with earlier data.
You can do so by using the `--prepend` flag, combined with `--timerange` - specifying an end-date.
@@ -238,7 +213,36 @@ Size has been taken from the BTC/USDT 1m spot combination for the timerange spec
To have a best performance/size mix, we recommend the use of either feather or parquet.
#### Sub-command convert data
### Pairs file
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
If you are using Binance for example:
* create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
* update the `pairs.json` file to contain the currency pairs you are interested in.
```bash
mkdir -p user_data/data/binance
touch user_data/data/binance/pairs.json
```
The format of the `pairs.json` file is a simple json list.
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
``` json
[
"ETH/BTC",
"ETH/USDT",
"BTC/USDT",
"XRP/ETH"
]
```
!!! Note
The `pairs.json` file is only used when no configuration is loaded (implicitly by naming, or via `--config` flag).
You can force the usage of this file via `--pairs-file pairs.json` - however we recommend to use the pairlist from within the configuration, either via `exchange.pair_whitelist` or `pairs` setting in the configuration.
## Sub-command convert data
```
usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
@@ -290,7 +294,7 @@ Common arguments:
```
##### Example converting data
### Example converting data
The following command will convert all candle (OHLCV) data available in `~/.freqtrade/data/binance` from json to jsongz, saving diskspace in the process.
It'll also remove original json data files (`--erase` parameter).
@@ -299,7 +303,7 @@ It'll also remove original json data files (`--erase` parameter).
freqtrade convert-data --format-from json --format-to jsongz --datadir ~/.freqtrade/data/binance -t 5m 15m --erase
```
#### Sub-command convert trade data
## Sub-command convert trade data
```
usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
@@ -342,7 +346,7 @@ Common arguments:
```
##### Example converting trades
### Example converting trades
The following command will convert all available trade-data in `~/.freqtrade/data/kraken` from jsongz to json.
It'll also remove original jsongz data files (`--erase` parameter).
@@ -351,7 +355,7 @@ It'll also remove original jsongz data files (`--erase` parameter).
freqtrade convert-trade-data --format-from jsongz --format-to json --datadir ~/.freqtrade/data/kraken --erase
```
### Sub-command trades to ohlcv
## Sub-command trades to ohlcv
When you need to use `--dl-trades` (kraken only) to download data, conversion of trades data to ohlcv data is the last step.
This command will allow you to repeat this last step for additional timeframes without re-downloading the data.
@@ -400,13 +404,13 @@ Common arguments:
```
#### Example trade-to-ohlcv conversion
### Example trade-to-ohlcv conversion
``` bash
freqtrade trades-to-ohlcv --exchange kraken -t 5m 1h 1d --pairs BTC/EUR ETH/EUR
```
### Sub-command list-data
## Sub-command list-data
You can get a list of downloaded data using the `list-data` sub-command.
@@ -451,7 +455,7 @@ Common arguments:
```
#### Example list-data
### Example list-data
```bash
> freqtrade list-data --userdir ~/.freqtrade/user_data/
@@ -465,7 +469,7 @@ ETH/BTC 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d
ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h
```
### Trades (tick) data
## Trades (tick) data
By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API.
This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes.

View File

@@ -327,18 +327,18 @@ To check how the new exchange behaves, you can use the following snippet:
``` python
import ccxt
from datetime import datetime
from datetime import datetime, timezone
from freqtrade.data.converter import ohlcv_to_dataframe
ct = ccxt.binance()
ct = ccxt.binance() # Use the exchange you're testing
timeframe = "1d"
pair = "XLM/BTC" # Make sure to use a pair that exists on that exchange!
pair = "BTC/USDT" # Make sure to use a pair that exists on that exchange!
raw = ct.fetch_ohlcv(pair, timeframe=timeframe)
# convert to dataframe
df1 = ohlcv_to_dataframe(raw, timeframe, pair=pair, drop_incomplete=False)
print(df1.tail(1))
print(datetime.utcnow())
print(datetime.now(timezone.utc))
```
``` output
@@ -453,7 +453,13 @@ Once the PR against stable is merged (best right after merging):
* Use the button "Draft a new release" in the Github UI (subsection releases).
* Use the version-number specified as tag.
* Use "stable" as reference (this step comes after the above PR is merged).
* Use the above changelog as release comment (as codeblock)
* Use the above changelog as release comment (as codeblock).
* Use the below snippet for the new release
??? Tip "Release template"
````
--8<-- "includes/release_template.md"
````
## Releases

View File

@@ -142,6 +142,13 @@ To fix this, redefine order types in the strategy to use "limit" instead of "mar
The same fix should be applied in the configuration file, if order types are defined in your custom config rather than in the strategy.
### I'm trying to start the bot live, but get an API permission error
Errors like `Invalid API-key, IP, or permissions for action` mean exactly what they actually say.
Your API key is either invalid (copy/paste error? check for leading/trailing spaces in the config), expired, or the IP you're running the bot from is not enabled in the Exchange's API console.
Usually, the permission "Spot Trading" (or the equivalent in the exchange you use) will be necessary.
Futures will usually have to be enabled specifically.
### How do I search the bot logs for something?
By default, the bot writes its log into stderr stream. This is implemented this way so that you can easily separate the bot's diagnostics messages from Backtesting, Edge and Hyperopt results, output from other various Freqtrade utility sub-commands, as well as from the output of your custom `print()`'s you may have inserted into your strategy. So if you need to search the log messages with the grep utility, you need to redirect stderr to stdout and disregard stdout.

View File

@@ -43,16 +43,16 @@ The FreqAI strategy requires including the following lines of code in the standa
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# the model will return all labels created by user in `set_freqai_labels()`
# the model will return all labels created by user in `set_freqai_targets()`
# (& appended targets), an indication of whether or not the prediction should be accepted,
# the target mean/std values for each of the labels created by user in
# `feature_engineering_*` for each training period.
# `set_freqai_targets()` for each training period.
dataframe = self.freqai.start(dataframe, metadata, self)
return dataframe
def feature_engineering_expand_all(self, dataframe, period, **kwargs):
def feature_engineering_expand_all(self, dataframe: DataFrame, period, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
This function will automatically expand the defined features on the config defined
@@ -77,7 +77,7 @@ The FreqAI strategy requires including the following lines of code in the standa
return dataframe
def feature_engineering_expand_basic(self, dataframe, **kwargs):
def feature_engineering_expand_basic(self, dataframe: DataFrame, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
This function will automatically expand the defined features on the config defined
@@ -101,7 +101,7 @@ The FreqAI strategy requires including the following lines of code in the standa
dataframe["%-raw_price"] = dataframe["close"]
return dataframe
def feature_engineering_standard(self, dataframe, **kwargs):
def feature_engineering_standard(self, dataframe: DataFrame, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
This optional function will be called once with the dataframe of the base timeframe.
@@ -122,7 +122,7 @@ The FreqAI strategy requires including the following lines of code in the standa
dataframe["%-hour_of_day"] = (dataframe["date"].dt.hour + 1) / 25
return dataframe
def set_freqai_targets(self, dataframe, **kwargs):
def set_freqai_targets(self, dataframe: DataFrame, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
Required function to set the targets for the model.
@@ -139,6 +139,7 @@ The FreqAI strategy requires including the following lines of code in the standa
/ dataframe["close"]
- 1
)
return dataframe
```
Notice how the `feature_engineering_*()` is where [features](freqai-feature-engineering.md#feature-engineering) are added. Meanwhile `set_freqai_targets()` adds the labels/targets. A full example strategy is available in `templates/FreqaiExampleStrategy.py`.
@@ -159,7 +160,7 @@ Below are the values you can expect to include/use inside a typical strategy dat
|------------|-------------|
| `df['&*']` | Any dataframe column prepended with `&` in `set_freqai_targets()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
| `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.
@@ -247,9 +248,11 @@ The easiest way to quickly run a pytorch model is with the following command (fo
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel PyTorchMLPRegressor --strategy-path freqtrade/templates
```
!!! note "Installation/docker"
!!! Note "Installation/docker"
The PyTorch module requires large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl or PyTorch (~700mb additional space required) [y/N]?".
Users who prefer docker should ensure they use the docker image appended with `_freqaitorch`.
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file.
This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
### Structure
@@ -386,7 +389,7 @@ Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. T
For example, if you are using a binary classifier to predict price movements as up or down, you can set the class names as follows:
```python
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame:
self.freqai.class_names = ["down", "up"]
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
dataframe["close"], 'up', 'down')
@@ -394,3 +397,21 @@ Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. T
return dataframe
```
To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py).
#### Improving performance with `torch.compile()`
Torch provides a `torch.compile()` method that can be used to improve performance for specific GPU hardware. More details can be found [here](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). In brief, you simply wrap your `model` in `torch.compile()`:
```python
model = PyTorchMLPModel(
input_dim=n_features,
output_dim=1,
**self.model_kwargs
)
model.to(self.device)
model = torch.compile(model)
```
Then proceed to use the model as normal. Keep in mind that doing this will remove eager execution, which means errors and tracebacks will not be informative.

View File

@@ -16,7 +16,7 @@ Meanwhile, high level feature engineering is handled within `"feature_parameters
It is advisable to start from the template `feature_engineering_*` functions in the source provided example strategy (found in `templates/FreqaiExampleStrategy.py`) to ensure that the feature definitions are following the correct conventions. Here is an example of how to set the indicators and labels in the strategy:
```python
def feature_engineering_expand_all(self, dataframe, period, metadata, **kwargs):
def feature_engineering_expand_all(self, dataframe: DataFrame, period, metadata, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
This function will automatically expand the defined features on the config defined
@@ -67,7 +67,7 @@ It is advisable to start from the template `feature_engineering_*` functions in
return dataframe
def feature_engineering_expand_basic(self, dataframe, metadata, **kwargs):
def feature_engineering_expand_basic(self, dataframe: DataFrame, metadata, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
This function will automatically expand the defined features on the config defined
@@ -96,7 +96,7 @@ It is advisable to start from the template `feature_engineering_*` functions in
dataframe["%-raw_price"] = dataframe["close"]
return dataframe
def feature_engineering_standard(self, dataframe, metadata, **kwargs):
def feature_engineering_standard(self, dataframe: DataFrame, metadata, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
This optional function will be called once with the dataframe of the base timeframe.
@@ -122,7 +122,7 @@ It is advisable to start from the template `feature_engineering_*` functions in
dataframe["%-hour_of_day"] = (dataframe["date"].dt.hour + 1) / 25
return dataframe
def set_freqai_targets(self, dataframe, metadata, **kwargs):
def set_freqai_targets(self, dataframe: DataFrame, metadata, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
Required function to set the targets for the model.
@@ -180,16 +180,18 @@ You can ask for each of the defined features to be included also for informative
In total, the number of features the user of the presented example strat has created is: length of `include_timeframes` * no. features in `feature_engineering_expand_*()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
$= 3 * 3 * 3 * 2 * 2 = 108$.
!!! note "Learn more about creative feature engineering"
Check out our [medium article](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665) geared toward helping users learn how to creatively engineer features.
### Gain finer control over `feature_engineering_*` functions with `metadata`
All `feature_engineering_*` and `set_freqai_targets()` functions are passed a `metadata` dictionary which contains information about the `pair`, `tf` (timeframe), and `period` that FreqAI is automating for feature building. As such, a user can use `metadata` inside `feature_engineering_*` functions as criteria for blocking/reserving features for certain timeframes, periods, pairs etc.
All `feature_engineering_*` and `set_freqai_targets()` functions are passed a `metadata` dictionary which contains information about the `pair`, `tf` (timeframe), and `period` that FreqAI is automating for feature building. As such, a user can use `metadata` inside `feature_engineering_*` functions as criteria for blocking/reserving features for certain timeframes, periods, pairs etc.
```python
def feature_engineering_expand_all(self, dataframe, period, metadata, **kwargs):
if metadata["tf"] == "1h":
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
```python
def feature_engineering_expand_all(self, dataframe: DataFrame, period, metadata, **kwargs) -> DataFrame:
if metadata["tf"] == "1h":
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
```
This will block `ta.ROC()` from being added to any timeframes other than `"1h"`.
@@ -210,41 +212,7 @@ Another example, where the user wants to use live metrics from the trade databas
You need to set the standard dictionary in the config so that FreqAI can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, the pre-set values are what will be returned.
## Feature normalization
FreqAI is strict when it comes to data normalization. The train features, $X^{train}$, are always normalized to [-1, 1] using a shifted min-max normalization:
$$X^{train}_{norm} = 2 * \frac{X^{train} - X^{train}.min()}{X^{train}.max() - X^{train}.min()} - 1$$
All other data (test data and unseen prediction data in dry/live/backtest) is always automatically normalized to the training feature space according to industry standards. FreqAI stores all the metadata required to ensure that test and prediction features will be properly normalized and that predictions are properly denormalized. For this reason, it is not recommended to eschew industry standards and modify FreqAI internals - however - advanced users can do so by inheriting `train()` in their custom `IFreqaiModel` and using their own normalization functions.
## Data dimensionality reduction with Principal Component Analysis
You can reduce the dimensionality of your features by activating the `principal_component_analysis` in the config:
```json
"freqai": {
"feature_parameters" : {
"principal_component_analysis": true
}
}
```
This will perform PCA on the features and reduce their dimensionality so that the explained variance of the data set is >= 0.999. Reducing data dimensionality makes training the model faster and hence allows for more up-to-date models.
## Inlier metric
The `inlier_metric` is a metric aimed at quantifying how similar the features of a data point are to the most recent historical data points.
You define the lookback window by setting `inlier_metric_window` and FreqAI computes the distance between the present time point and each of the previous `inlier_metric_window` lookback points. A Weibull function is fit to each of the lookback distributions and its cumulative distribution function (CDF) is used to produce a quantile for each lookback point. The `inlier_metric` is then computed for each time point as the average of the corresponding lookback quantiles. The figure below explains the concept for an `inlier_metric_window` of 5.
![inlier-metric](assets/freqai_inlier-metric.jpg)
FreqAI adds the `inlier_metric` to the training features and hence gives the model access to a novel type of temporal information.
This function does **not** remove outliers from the data set.
## Weighting features for temporal importance
### Weighting features for temporal importance
FreqAI allows you to set a `weight_factor` to weight recent data more strongly than past data via an exponential function:
@@ -254,13 +222,103 @@ where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. B
![weight-factor](assets/freqai_weight-factor.jpg)
## Building the data pipeline
By default, FreqAI builds a dynamic pipeline based on user congfiguration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`.
!!! note "More information available"
Please review the [parameter table](freqai-parameter-table.md) for more information on these parameters.
### Customizing the pipeline
Users are encouraged to customize the data pipeline to their needs by building their own data pipeline. This can be done by simply setting `dk.feature_pipeline` to their desired `Pipeline` object inside their `IFreqaiModel` `train()` function, or if they prefer not to touch the `train()` function, they can override `define_data_pipeline`/`define_label_pipeline` functions in their `IFreqaiModel`:
!!! note "More information available"
FreqAI uses the the [`DataSieve`](https://github.com/emergentmethods/datasieve) pipeline, which follows the SKlearn pipeline API, but adds, among other features, coherence between the X, y, and sample_weight vector point removals, feature removal, feature name following.
```python
from datasieve.transforms import SKLearnWrapper, DissimilarityIndex
from datasieve.pipeline import Pipeline
from sklearn.preprocessing import QuantileTransformer, StandardScaler
from freqai.base_models import BaseRegressionModel
class MyFreqaiModel(BaseRegressionModel):
"""
Some cool custom model
"""
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
"""
My custom fit function
"""
model = cool_model.fit()
return model
def define_data_pipeline(self) -> Pipeline:
"""
User defines their custom feature pipeline here (if they wish)
"""
feature_pipeline = Pipeline([
('qt', SKLearnWrapper(QuantileTransformer(output_distribution='normal'))),
('di', ds.DissimilarityIndex(di_threshold=1)
])
return feature_pipeline
def define_label_pipeline(self) -> Pipeline:
"""
User defines their custom label pipeline here (if they wish)
"""
label_pipeline = Pipeline([
('qt', SKLearnWrapper(StandardScaler())),
])
return label_pipeline
```
Here, you are defining the exact pipeline that will be used for your feature set during training and prediction. You can use *most* SKLearn transformation steps by wrapping them in the `SKLearnWrapper` class as shown above. In addition, you can use any of the transformations available in the [`DataSieve` library](https://github.com/emergentmethods/datasieve).
You can easily add your own transformation by creating a class that inherits from the datasieve `BaseTransform` and implementing your `fit()`, `transform()` and `inverse_transform()` methods:
```python
from datasieve.transforms.base_transform import BaseTransform
# import whatever else you need
class MyCoolTransform(BaseTransform):
def __init__(self, **kwargs):
self.param1 = kwargs.get('param1', 1)
def fit(self, X, y=None, sample_weight=None, feature_list=None, **kwargs):
# do something with X, y, sample_weight, or/and feature_list
return X, y, sample_weight, feature_list
def transform(self, X, y=None, sample_weight=None,
feature_list=None, outlier_check=False, **kwargs):
# do something with X, y, sample_weight, or/and feature_list
return X, y, sample_weight, feature_list
def inverse_transform(self, X, y=None, sample_weight=None, feature_list=None, **kwargs):
# do/dont do something with X, y, sample_weight, or/and feature_list
return X, y, sample_weight, feature_list
```
!!! note "Hint"
You can define this custom class in the same file as your `IFreqaiModel`.
### Migrating a custom `IFreqaiModel` to the new Pipeline
If you have created your own custom `IFreqaiModel` with a custom `train()`/`predict()` function, *and* you still rely on `data_cleaning_train/predict()`, then you will need to migrate to the new pipeline. If your model does *not* rely on `data_cleaning_train/predict()`, then you do not need to worry about this migration.
More details about the migration can be found [here](strategy_migration.md#freqai---new-data-pipeline).
## Outlier detection
Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. FreqAI implements a variety of methods to identify such outliers and hence mitigate risk.
### Identifying outliers with the Dissimilarity Index (DI)
The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model.
The Dissimilarity Index (DI) aims to quantify the uncertainty associated with each prediction made by the model.
You can tell FreqAI to remove outlier data points from the training/test data sets using the DI by including the following statement in the config:
@@ -272,7 +330,7 @@ You can tell FreqAI to remove outlier data points from the training/test data se
}
```
The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
Which will add `DissimilarityIndex` step to your `feature_pipeline` and set the threshold to 1. The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
@@ -306,9 +364,9 @@ You can tell FreqAI to remove outlier data points from the training/test data se
}
```
The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
Which will add `SVMOutlierExtractor` step to your `feature_pipeline`. The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
FreqAI uses `sklearn.linear_model.SGDOneClassSVM` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDOneClassSVM.html) (external website)) and you can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu`.
You can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu` via the `feature_parameters.svm_params` dictionary in the config.
The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time.
@@ -326,7 +384,7 @@ You can configure FreqAI to use DBSCAN to cluster and remove outliers from the t
}
```
DBSCAN is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be.
Which will add the `DataSieveDBSCAN` step to your `feature_pipeline`. This is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be.
Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters the data set by setting all data points that have $N-1$ other data points within a distance of $\varepsilon$ as *core points*. A data point that is within a distance of $\varepsilon$ from a *core point* but that does not have $N-1$ other data points within a distance of $\varepsilon$ from itself is considered an *edge point*. A cluster is then the collection of *core points* and *edge points*. Data points that have no other data points at a distance $<\varepsilon$ are considered outliers. The figure below shows a cluster with $N = 3$.

View File

@@ -18,9 +18,10 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `purge_old_models` | Number of models to keep on disk (not relevant to backtesting). Default is 2, which means that dry/live runs will keep the latest 2 models on disk. Setting to 0 keeps all models. This parameter also accepts a boolean to maintain backwards compatibility. <br> **Datatype:** Integer. <br> Default: `2`.
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). <br> **Datatype:** Boolean. <br> Default: `False`.
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the connections here primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market. <br> **Datatype:** Boolean. <br> Default: `False`.
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
| `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer.
| `activate_tensorboard` | <br> Indicate whether or not to activate tensorboard for the tensorboard enabled modules (currently Reinforcment Learning, XGBoost, Catboost, and PyTorch). Tensorboard needs Torch installed, which means you will need the torch/RL docker image or you need to answer "yes" to the install question about whether or not you wish to install Torch. <br> **Datatype:** Boolean. <br> Default: `True`.
### Feature parameters
@@ -114,5 +115,5 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|------------|-------------|
| | **Extraneous parameters**
| `freqai.keras` | If the selected model makes use of Keras (typical for TensorFlow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
| `freqai.conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
| `freqai.conv_width` | The width of a neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.

View File

@@ -37,7 +37,7 @@ freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --con
where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner` (or a custom user defined one located in `user_data/freqaimodels`). The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `feature_engineering_*` as a typical Regressor. The difference lies in the creation of the targets, Reinforcement Learning doesn't require them. However, FreqAI requires a default (neutral) value to be set in the action column:
```python
def set_freqai_targets(self, dataframe, **kwargs):
def set_freqai_targets(self, dataframe, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
Required function to set the targets for the model.
@@ -53,17 +53,19 @@ where `ReinforcementLearner` will use the templated `ReinforcementLearner` from
# For RL, there are no direct targets to set. This is filler (neutral)
# until the agent sends an action.
dataframe["&-action"] = 0
return dataframe
```
Most of the function remains the same as for typical Regressors, however, the function below shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment:
```python
def feature_engineering_standard(self, dataframe, **kwargs):
def feature_engineering_standard(self, dataframe: DataFrame, **kwargs) -> DataFrame:
# The following features are necessary for RL models
dataframe[f"%-raw_close"] = dataframe["close"]
dataframe[f"%-raw_open"] = dataframe["open"]
dataframe[f"%-raw_high"] = dataframe["high"]
dataframe[f"%-raw_low"] = dataframe["low"]
return dataframe
```
Finally, there is no explicit "label" to make - instead it is necessary to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action.
@@ -133,92 +135,104 @@ Parameter details can be found [here](freqai-parameter-table.md), but in general
## Creating a custom reward function
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
!!! danger "Not for production"
Warning!
The reward function provided with the Freqtrade source code is a showcase of functionality designed to show/test as many possible environment control features as possible. It is also designed to run quickly on small computers. This is a benchmark, it is *not* for live production. Please beware that you will need to create your own custom_reward() function or use a template built by other users outside of the Freqtrade source code.
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but this is *not* designed for production. Users *must* create their own custom reinforcement learning model class or use a pre-built one from outside the Freqtrade source code and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
!!! note "Hint"
The best reward functions are ones that are continuously differentiable, and well scaled. In other words, adding a single large negative penalty to a rare event is not a good idea, and the neural net will not be able to learn that function. Instead, it is better to add a small negative penalty to a common event. This will help the agent learn faster. Not only this, but you can help improve the continuity of your rewards/penalties by having them scale with severity according to some linear/exponential functions. In other words, you'd slowly scale the penalty as the duration of the trade increases. This is better than a single large penalty occuring at a single point in time.
```python
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
class MyCoolRLModel(ReinforcementLearner):
class MyCoolRLModel(ReinforcementLearner):
"""
User created RL prediction model.
Save this file to `freqtrade/user_data/freqaimodels`
then use it with:
freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat
Here the users can override any of the functions
available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this
is where the user overrides `MyRLEnv` (see below), to define custom
`calculate_reward()` function, or to override any other parts of the environment.
This class also allows users to override any other part of the IFreqaiModel tree.
For example, the user can override `def fit()` or `def train()` or `def predict()`
to take fine-tuned control over these processes.
Another common override may be `def data_cleaning_predict()` where the user can
take fine-tuned control over the data handling pipeline.
"""
class MyRLEnv(Base5ActionRLEnv):
"""
User created RL prediction model.
User made custom environment. This class inherits from BaseEnvironment and gym.env.
Users can override any functions from those parent classes. Here is an example
of a user customized `calculate_reward()` function.
Save this file to `freqtrade/user_data/freqaimodels`
then use it with:
freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat
Here the users can override any of the functions
available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this
is where the user overrides `MyRLEnv` (see below), to define custom
`calculate_reward()` function, or to override any other parts of the environment.
This class also allows users to override any other part of the IFreqaiModel tree.
For example, the user can override `def fit()` or `def train()` or `def predict()`
to take fine-tuned control over these processes.
Another common override may be `def data_cleaning_predict()` where the user can
take fine-tuned control over the data handling pipeline.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
"""
class MyRLEnv(Base5ActionRLEnv):
"""
User made custom environment. This class inherits from BaseEnvironment and gym.env.
Users can override any functions from those parent classes. Here is an example
of a user customized `calculate_reward()` function.
"""
def calculate_reward(self, action: int) -> float:
# first, penalize if the action is not valid
if not self._is_valid(action):
return -2
pnl = self.get_unrealized_profit()
def calculate_reward(self, action: int) -> float:
# first, penalize if the action is not valid
if not self._is_valid(action):
return -2
pnl = self.get_unrealized_profit()
factor = 100
factor = 100
pair = self.pair.replace(':', '')
pair = self.pair.replace(':', '')
# you can use feature values from dataframe
# Assumes the shifted RSI indicator has been generated in the strategy.
rsi_now = self.raw_features[f"%-rsi-period_10_shift-1_{pair}_"
f"{self.config['timeframe']}"].iloc[self._current_tick]
# you can use feature values from dataframe
# Assumes the shifted RSI indicator has been generated in the strategy.
rsi_now = self.raw_features[f"%-rsi-period_10_shift-1_{pair}_"
f"{self.config['timeframe']}"].iloc[self._current_tick]
# reward agent for entering trades
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
and self._position == Positions.Neutral):
if rsi_now < 40:
factor = 40 / rsi_now
else:
factor = 1
return 25 * factor
# reward agent for entering trades
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
and self._position == Positions.Neutral):
if rsi_now < 40:
factor = 40 / rsi_now
else:
factor = 1
return 25 * factor
# discourage agent from not entering trades
if action == Actions.Neutral.value and self._position == Positions.Neutral:
return -1
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
trade_duration = self._current_tick - self._last_trade_tick
if trade_duration <= max_trade_duration:
factor *= 1.5
elif trade_duration > max_trade_duration:
factor *= 0.5
# discourage sitting in position
if self._position in (Positions.Short, Positions.Long) and \
action == Actions.Neutral.value:
return -1 * trade_duration / max_trade_duration
# close long
if action == Actions.Long_exit.value and self._position == Positions.Long:
if pnl > self.profit_aim * self.rr:
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float(pnl * factor)
# close short
if action == Actions.Short_exit.value and self._position == Positions.Short:
if pnl > self.profit_aim * self.rr:
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float(pnl * factor)
return 0.
# discourage agent from not entering trades
if action == Actions.Neutral.value and self._position == Positions.Neutral:
return -1
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
trade_duration = self._current_tick - self._last_trade_tick
if trade_duration <= max_trade_duration:
factor *= 1.5
elif trade_duration > max_trade_duration:
factor *= 0.5
# discourage sitting in position
if self._position in (Positions.Short, Positions.Long) and \
action == Actions.Neutral.value:
return -1 * trade_duration / max_trade_duration
# close long
if action == Actions.Long_exit.value and self._position == Positions.Long:
if pnl > self.profit_aim * self.rr:
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float(pnl * factor)
# close short
if action == Actions.Short_exit.value and self._position == Positions.Short:
if pnl > self.profit_aim * self.rr:
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
return float(pnl * factor)
return 0.
```
### Using Tensorboard
## Using Tensorboard
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
@@ -231,32 +245,30 @@ where `unique-id` is the `identifier` set in the `freqai` configuration file. Th
![tensorboard](assets/tensorboard.jpg)
### Custom logging
## Custom logging
FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.
`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called:
```py
class MyRLEnv(Base5ActionRLEnv):
"""
User made custom environment. This class inherits from BaseEnvironment and gym.env.
Users can override any functions from those parent classes. Here is an example
of a user customized `calculate_reward()` function.
"""
def calculate_reward(self, action: int) -> float:
if not self._is_valid(action):
self.tensorboard_log("invalid")
return -2
```python
class MyRLEnv(Base5ActionRLEnv):
"""
User made custom environment. This class inherits from BaseEnvironment and gym.env.
Users can override any functions from those parent classes. Here is an example
of a user customized `calculate_reward()` function.
"""
def calculate_reward(self, action: int) -> float:
if not self._is_valid(action):
self.tensorboard_log("invalid")
return -2
```
!!! Note
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)`. In this case the metric values are not incremented.
### Choosing a base environment
## Choosing a base environment
FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:

View File

@@ -131,6 +131,9 @@ You can choose to adopt a continual learning scheme by setting `"continual_learn
???+ danger "Continual learning enforces a constant parameter space"
Since `continual_learning` means that the model parameter space *cannot* change between trainings, `principal_component_analysis` is automatically disabled when `continual_learning` is enabled. Hint: PCA changes the parameter space and the number of features, learn more about PCA [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis).
???+ danger "Experimental functionality"
Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the mechanics available in FreqAI primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market.
## Hyperopt
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
@@ -158,7 +161,14 @@ This specific hyperopt would help you understand the appropriate `DI_values` for
## Using Tensorboard
CatBoost models benefit from tracking training metrics via Tensorboard. You can take advantage of the FreqAI integration to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
!!! note "Availability"
FreqAI includes tensorboard for a variety of models, including XGBoost, all PyTorch models, Reinforcement Learning, and Catboost. If you would like to see Tensorboard integrated into another model type, please open an issue on the [Freqtrade GitHub](https://github.com/freqtrade/freqtrade/issues)
!!! danger "Requirements"
Tensorboard logging requires the FreqAI torch installation/docker image.
The easiest way to use tensorboard is to ensure `freqai.activate_tensorboard` is set to `True` (default setting) in your configuration file, run FreqAI, then open a separate shell and run:
```bash
cd freqtrade
@@ -168,3 +178,7 @@ tensorboard --logdir user_data/models/unique-id
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if you wish to view the output in your browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard).
![tensorboard](assets/tensorboard.jpg)
!!! note "Deactivate for improved performance"
Tensorboard logging can slow down training and should be deactivated for production use.

View File

@@ -32,7 +32,10 @@ The easiest way to quickly test FreqAI is to run it in dry mode with the followi
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel LightGBMRegressor --strategy-path freqtrade/templates
```
You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
!!! danger "Not for production"
The example strategy provided with the Freqtrade source code is designed for showcasing/testing a wide variety of FreqAI features. It is also designed to run on small computers so that it can be used as a benchmark between developers and users. It is *not* designed to be run in production.
An example strategy, prediction model, and config to use as a starting points can be found in
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and
@@ -69,15 +72,14 @@ pip install -r requirements-freqai.txt
```
!!! Note
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since it does not provide wheels for this platform.
!!! Note "python 3.11"
Some dependencies (Catboost, Torch) currently don't support python 3.11. Freqtrade therefore only supports python 3.10 for these models/dependencies.
Tests involving these dependencies are skipped on 3.11.
Catboost will not be installed on low-powered arm devices (raspberry), since it does not provide wheels for this platform.
### Usage with docker
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices. If you would like to use PyTorch or Reinforcement learning, you should use the torch or RL tags, `image: freqtradeorg/freqtrade:develop_freqaitorch`, `image: freqtradeorg/freqtrade:develop_freqairl`.
!!! note "docker-compose-freqai.yml"
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file. This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
### FreqAI position in open-source machine learning landscape
@@ -105,6 +107,13 @@ This is for performance reasons - FreqAI relies on making quick predictions/retr
it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends
new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume).
## Additional learning materials
Here we compile some external materials that provide deeper looks into various components of FreqAI:
- [Real-time head-to-head: Adaptive modeling of financial market data using XGBoost and CatBoost](https://emergentmethods.medium.com/real-time-head-to-head-adaptive-modeling-of-financial-market-data-using-xgboost-and-catboost-995a115a7495)
- [FreqAI - from price to prediction](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665)
## Credits
FreqAI is developed by a group of individuals who all contribute specific skillsets to the project.

View File

@@ -184,6 +184,8 @@ The RemotePairList is defined in the pairlists section of the configuration sett
"pairlists": [
{
"method": "RemotePairList",
"mode": "whitelist",
"processing_mode": "filter",
"pairlist_url": "https://example.com/pairlist",
"number_assets": 10,
"refresh_period": 1800,
@@ -194,6 +196,14 @@ The RemotePairList is defined in the pairlists section of the configuration sett
]
```
The optional `mode` option specifies if the pairlist should be used as a `blacklist` or as a `whitelist`. The default value is "whitelist".
The optional `processing_mode` option in the RemotePairList configuration determines how the retrieved pairlist is processed. It can have two values: "filter" or "append".
In "filter" mode, the retrieved pairlist is used as a filter. Only the pairs present in both the original pairlist and the retrieved pairlist are included in the final pairlist. Other pairs are filtered out.
In "append" mode, the retrieved pairlist is added to the original pairlist. All pairs from both lists are included in the final pairlist without any filtering.
The `pairlist_url` option specifies the URL of the remote server where the pairlist is located, or the path to a local file (if file:/// is prepended). This allows the user to use either a remote server or a local file as the source for the pairlist.
The user is responsible for providing a server or local file that returns a JSON object with the following structure:

View File

@@ -0,0 +1,37 @@
## Highlighted changes
- ...
### How to update
As always, you can update your bot using one of the following commands:
#### docker-compose
```bash
docker-compose pull
docker-compose up -d
```
#### Installation via setup script
```
# Deactivate venv and run
./setup.sh --update
```
#### Plain native installation
```
git pull
pip install -U -r requirements.txt
```
<details>
<summary>Expand full changelog</summary>
```
<Paste your changelog here>
```
</details>

11
docs/includes/showcase.md Normal file
View File

@@ -0,0 +1,11 @@
This section will highlight a few projects from members of the community.
!!! Note
The projects below are for the most part not maintained by the freqtrade , therefore use your own caution before using them.
- [Example freqtrade strategies](https://github.com/freqtrade/freqtrade-strategies/)
- [FrequentHippo - Grafana dashboard with dry/live runs and backtests](http://frequenthippo.ddns.net:3000/) (by hippocritical).
- [Online pairlist generator](http://pairlist.robot.co.network/) (by Blood4rc).
- [Freqtrade Backtesting Project](http://bt.robot.co.network/) (by Blood4rc).
- [Freqtrade analysis notebook](https://github.com/froggleston/freqtrade_analysis_notebook) (by Froggleston).
- [TUI for freqtrade](https://github.com/froggleston/freqtrade-frogtrade9000) (by Froggleston).
- [Bot Academy](https://botacademy.ddns.net/) (by stash86) - Blog about crypto bot projects.

View File

@@ -63,6 +63,10 @@ Exchanges confirmed working by the community:
- [X] [Bitvavo](https://bitvavo.com/)
- [X] [Kucoin](https://www.kucoin.com/)
## Community showcase
--8<-- "includes/showcase.md"
## Requirements
### Hardware requirements

View File

@@ -30,12 +30,6 @@ The easiest way to install and run Freqtrade is to clone the bot Github reposito
!!! Warning "Up-to-date clock"
The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges.
!!! Error "Running setup.py install for gym did not run successfully."
If you get an error related with gym we suggest you to downgrade setuptools it to version 65.5.0 you can do it with the following command:
```bash
pip install setuptools==65.5.0
```
------
## Requirements
@@ -242,6 +236,7 @@ source .env/bin/activate
```bash
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt
python3 -m pip install -e .
```

100
docs/lookahead-analysis.md Normal file
View File

@@ -0,0 +1,100 @@
# Lookahead analysis
This page explains how to validate your strategy in terms of look ahead bias.
Checking look ahead bias is the bane of any strategy since it is sometimes very easy to introduce backtest bias -
but very hard to detect.
Backtesting initializes all timestamps at once and calculates all indicators in the beginning.
This means that if your indicators or entry/exit signals could look into future candles and falsify your backtest.
Lookahead-analysis requires historic data to be available.
To learn how to get data for the pairs and exchange you're interested in,
head over to the [Data Downloading](data-download.md) section of the documentation.
This command is built upon backtesting since it internally chains backtests and pokes at the strategy to provoke it to show look ahead bias.
This is done by not looking at the strategy itself - but at the results it returned.
The results are things like changed indicator-values and moved entries/exits compared to the full backtest.
You can use commands of [Backtesting](backtesting.md).
It also supports the lookahead-analysis of freqai strategies.
- `--cache` is forced to "none".
- `--max-open-trades` is forced to be at least equal to the number of pairs.
- `--dry-run-wallet` is forced to be basically infinite.
## Lookahead-analysis command reference
```
usage: freqtrade lookahead-analysis [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [-s NAME]
[--strategy-path PATH]
[--recursive-strategy-search]
[--freqaimodel NAME]
[--freqaimodel-path PATH] [-i TIMEFRAME]
[--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
[--max-open-trades INT]
[--stake-amount STAKE_AMOUNT]
[--fee FLOAT] [-p PAIRS [PAIRS ...]]
[--enable-protections]
[--dry-run-wallet DRY_RUN_WALLET]
[--timeframe-detail TIMEFRAME_DETAIL]
[--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]]
[--export {none,trades,signals}]
[--export-filename PATH]
[--breakdown {day,week,month} [{day,week,month} ...]]
[--cache {none,day,week,month}]
[--freqai-backtest-live-models]
[--minimum-trade-amount INT]
[--targeted-trade-amount INT]
[--lookahead-analysis-exportfilename LOOKAHEAD_ANALYSIS_EXPORTFILENAME]
options:
--minimum-trade-amount INT
Minimum trade amount for lookahead-analysis
--targeted-trade-amount INT
Targeted trade amount for lookahead analysis
--lookahead-analysis-exportfilename LOOKAHEAD_ANALYSIS_EXPORTFILENAME
Use this csv-filename to store lookahead-analysis-
results
```
!!! Note ""
The above Output was reduced to options `lookahead-analysis` adds on top of regular backtesting commands.
### Summary
Checks a given strategy for look ahead bias via lookahead-analysis
Look ahead bias means that the backtest uses data from future candles thereby not making it viable beyond backtesting
and producing false hopes for the one backtesting.
### Introduction
Many strategies - without the programmer knowing - have fallen prey to look ahead bias.
Any backtest will populate the full dataframe including all time stamps at the beginning.
If the programmer is not careful or oblivious how things work internally
(which sometimes can be really hard to find out) then it will just look into the future making the strategy amazing
but not realistic.
This command is made to try to verify the validity in the form of the aforementioned look ahead bias.
### How does the command work?
It will start with a backtest of all pairs to generate a baseline for indicators and entries/exits.
After the backtest ran, it will look if the `minimum-trade-amount` is met
and if not cancel the lookahead-analysis for this strategy.
After setting the baseline it will then do additional runs for every entry and exit separately.
When a verification-backtest is done, it will compare the indicators as the signal (either entry or exit) and report the bias.
After all signals have been verified or falsified a result-table will be generated for the user to see.
### Caveats
- `lookahead-analysis` can only verify / falsify the trades it calculated and verified.
If the strategy has many different signals / signal types, it's up to you to select appropriate parameters to ensure that all signals have triggered at least once. Not triggered signals will not have been verified.
This could lead to a false-negative (the strategy will then be reported as non-biased).
- `lookahead-analysis` has access to everything that backtesting has too.
Please don't provoke any configs like enabling position stacking.
If you decide to do so, then make doubly sure that you won't ever run out of `max_open_trades` amount and neither leftover money in your wallet.

View File

@@ -49,7 +49,7 @@ Enable subscribing to an instance by adding the `external_message_consumer` sect
| `wait_timeout` | Timeout until we ping again if no message is received. <br>*Defaults to `300`.*<br> **Datatype:** Integer - in seconds.
| `ping_timeout` | Ping timeout <br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
| `sleep_time` | Sleep time before retrying to connect.<br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
| `remove_entry_exit_signals` | Remove signal columns from the dataframe (set them to 0) on dataframe receipt.<br>*Defaults to `False`.*<br> **Datatype:** Boolean.
| `remove_entry_exit_signals` | Remove signal columns from the dataframe (set them to 0) on dataframe receipt.<br>*Defaults to `false`.*<br> **Datatype:** Boolean.
| `message_size_limit` | Size limit per message<br>*Defaults to `8`.*<br> **Datatype:** Integer - Megabytes.
Instead of (or as well as) calculating indicators in `populate_indicators()` the follower instance listens on the connection to a producer instance's messages (or multiple producer instances in advanced configurations) and requests the producer's most recently analyzed dataframes for each pair in the active whitelist.

View File

@@ -1,6 +1,6 @@
markdown==3.3.7
mkdocs==1.4.2
mkdocs-material==9.1.6
mkdocs==1.4.3
mkdocs-material==9.1.18
mdx_truly_sane_lists==1.3
pymdown-extensions==9.11
pymdown-extensions==10.0.1
jinja2==3.1.2

View File

@@ -134,7 +134,9 @@ python3 scripts/rest_client.py --config rest_config.json <command> [optional par
| `reload_config` | Reloads the configuration file.
| `trades` | List last trades. Limited to 500 trades per call.
| `trade/<tradeid>` | Get specific trade.
| `delete_trade <trade_id>` | Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange.
| `trade/<tradeid>` | DELETE - Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange.
| `trade/<tradeid>/open-order` | DELETE - Cancel open order for this trade.
| `trade/<tradeid>/reload` | GET - Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange.
| `show_config` | Shows part of the current configuration with relevant settings to operation.
| `logs` | Shows last log messages.
| `status` | Lists all open trades.

View File

@@ -209,11 +209,6 @@ You can also keep a static stoploss until the offset is reached, and then trail
If `trailing_only_offset_is_reached = True` then the trailing stoploss is only activated once the offset is reached. Until then, the stoploss remains at the configured `stoploss`.
This option can be used with or without `trailing_stop_positive`, but uses `trailing_stop_positive_offset` as offset.
``` python
trailing_stop_positive_offset = 0.011
trailing_only_offset_is_reached = True
```
Configuration (offset is buy-price + 3%):
``` python

View File

@@ -1,21 +1,21 @@
# Advanced Strategies
This page explains some advanced concepts available for strategies.
If you're just getting started, please be familiar with the methods described in the [Strategy Customization](strategy-customization.md) documentation and with the [Freqtrade basics](bot-basics.md) first.
If you're just getting started, please familiarize yourself with the [Freqtrade basics](bot-basics.md) and methods described in [Strategy Customization](strategy-customization.md) first.
[Freqtrade basics](bot-basics.md) describes in which sequence each method described below is called, which can be helpful to understand which method to use for your custom needs.
The call sequence of the methods described here is covered under [bot execution logic](bot-basics.md#bot-execution-logic). Those docs are also helpful in deciding which method is most suitable for your customisation needs.
!!! Note
All callback methods described below should only be implemented in a strategy if they are actually used.
Callback methods should *only* be implemented if a strategy uses them.
!!! Tip
You can get a strategy template containing all below methods by running `freqtrade new-strategy --strategy MyAwesomeStrategy --template advanced`
Start off with a strategy template containing all available callback methods by running `freqtrade new-strategy --strategy MyAwesomeStrategy --template advanced`
## Storing information
Storing information can be accomplished by creating a new dictionary within the strategy class.
The name of the variable can be chosen at will, but should be prefixed with `cust_` to avoid naming collisions with predefined strategy variables.
The name of the variable can be chosen at will, but should be prefixed with `custom_` to avoid naming collisions with predefined strategy variables.
```python
class AwesomeStrategy(IStrategy):
@@ -227,8 +227,8 @@ for val in self.buy_ema_short.range:
f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val)
}))
# Append columns to existing dataframe
merged_frame = pd.concat(frames, axis=1)
# Combine all dataframes, and reassign the original dataframe column
dataframe = pd.concat(frames, axis=1)
```
Freqtrade does however also counter this by running `dataframe.copy()` on the dataframe right after the `populate_indicators()` method - so performance implications of this should be low to non-existant.

View File

@@ -43,7 +43,7 @@ class AwesomeStrategy(IStrategy):
if self.config['runmode'].value in ('live', 'dry_run'):
# Assign this to the class by using self.*
# can then be used by populate_* methods
self.cust_remote_data = requests.get('https://some_remote_source.example.com')
self.custom_remote_data = requests.get('https://some_remote_source.example.com')
```
@@ -352,7 +352,7 @@ class AwesomeStrategy(IStrategy):
# Convert absolute price to percentage relative to current_rate
if stoploss_price < current_rate:
return (stoploss_price / current_rate) - 1
return stoploss_from_absolute(stoploss_price, current_rate, is_short=trade.is_short)
# return maximum stoploss value, keeping current stoploss price unchanged
return 1

View File

@@ -342,16 +342,12 @@ The above configuration would therefore mean:
The calculation does include fees.
To disable ROI completely, set it to an insanely high number:
To disable ROI completely, set it to an empty dictionary:
```python
minimal_roi = {
"0": 100
}
minimal_roi = {}
```
While technically not completely disabled, this would exit once the trade reaches 10000% Profit.
To use times based on candle duration (timeframe), the following snippet can be handy.
This will allow you to change the timeframe for the strategy, and ROI times will still be set as candles (e.g. after 3 candles ...)

View File

@@ -578,7 +578,7 @@ def populate_any_indicators(
Features will now expand automatically. As such, the expansion loops, as well as the `{pair}` / `{timeframe}` parts will need to be removed.
``` python linenums="1"
def feature_engineering_expand_all(self, dataframe, period, **kwargs):
def feature_engineering_expand_all(self, dataframe, period, **kwargs) -> DataFrame::
"""
*Only functional with FreqAI enabled strategies*
This function will automatically expand the defined features on the config defined
@@ -638,7 +638,7 @@ Features will now expand automatically. As such, the expansion loops, as well as
Basic features. Make sure to remove the `{pair}` part from your features.
``` python linenums="1"
def feature_engineering_expand_basic(self, dataframe, **kwargs):
def feature_engineering_expand_basic(self, dataframe: DataFrame, **kwargs) -> DataFrame::
"""
*Only functional with FreqAI enabled strategies*
This function will automatically expand the defined features on the config defined
@@ -673,7 +673,7 @@ Basic features. Make sure to remove the `{pair}` part from your features.
### FreqAI - feature engineering standard
``` python linenums="1"
def feature_engineering_standard(self, dataframe, **kwargs):
def feature_engineering_standard(self, dataframe: DataFrame, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
This optional function will be called once with the dataframe of the base timeframe.
@@ -704,7 +704,7 @@ Basic features. Make sure to remove the `{pair}` part from your features.
Targets now get their own, dedicated method.
``` python linenums="1"
def set_freqai_targets(self, dataframe, **kwargs):
def set_freqai_targets(self, dataframe: DataFrame, **kwargs) -> DataFrame:
"""
*Only functional with FreqAI enabled strategies*
Required function to set the targets for the model.
@@ -728,3 +728,86 @@ Targets now get their own, dedicated method.
return dataframe
```
### FreqAI - New data Pipeline
If you have created your own custom `IFreqaiModel` with a custom `train()`/`predict()` function, *and* you still rely on `data_cleaning_train/predict()`, then you will need to migrate to the new pipeline. If your model does *not* rely on `data_cleaning_train/predict()`, then you do not need to worry about this migration. That means that this migration guide is relevant for a very small percentage of power-users. If you stumbled upon this guide by mistake, feel free to inquire in depth about your problem in the Freqtrade discord server.
The conversion involves first removing `data_cleaning_train/predict()` and replacing them with a `define_data_pipeline()` and `define_label_pipeline()` function to your `IFreqaiModel` class:
```python linenums="1" hl_lines="11-14 47-49 55-57"
class MyCoolFreqaiModel(BaseRegressionModel):
"""
Some cool custom IFreqaiModel you made before Freqtrade version 2023.6
"""
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
# ... your custom stuff
# Remove these lines
# data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
# self.data_cleaning_train(dk)
# data_dictionary = dk.normalize_data(data_dictionary)
# (1)
# Add these lines. Now we control the pipeline fit/transform ourselves
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
# ... your custom code
return model
def predict(
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
# ... your custom stuff
# Remove these lines:
# self.data_cleaning_predict(dk)
# (2)
# Add these lines:
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
# Remove this line
# pred_df = dk.denormalize_labels_from_metadata(pred_df)
# (3)
# Replace with these lines
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
# ... your custom code
return (pred_df, dk.do_predict)
```
1. Data normalization and cleaning is now homogenized with the new pipeline definition. This is created in the new `define_data_pipeline()` and `define_label_pipeline()` functions. The `data_cleaning_train()` and `data_cleaning_predict()` functions are no longer used. You can override `define_data_pipeline()` to create your own custom pipeline if you wish.
2. Data normalization and cleaning is now homogenized with the new pipeline definition. This is created in the new `define_data_pipeline()` and `define_label_pipeline()` functions. The `data_cleaning_train()` and `data_cleaning_predict()` functions are no longer used. You can override `define_data_pipeline()` to create your own custom pipeline if you wish.
3. Data denormalization is done with the new pipeline. Replace this with the lines below.

View File

@@ -187,11 +187,13 @@ official commands. You can ask at any moment for help with `/help`.
| `/forcelong <pair> [rate]` | Instantly buys the given pair. Rate is optional and only applies to limit orders. (`force_entry_enable` must be set to True)
| `/forceshort <pair> [rate]` | Instantly shorts the given pair. Rate is optional and only applies to limit orders. This will only work on non-spot markets. (`force_entry_enable` must be set to True)
| `/delete <trade_id>` | Delete a specific trade from the Database. Tries to close open orders. Requires manual handling of this trade on the exchange.
| `/reload_trade <trade_id>` | Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange.
| `/cancel_open_order <trade_id> | /coo <trade_id>` | Cancel an open order for a trade.
| **Metrics** |
| `/profit [<n>]` | Display a summary of your profit/loss from close trades and some stats about your performance, over the last n days (all trades by default)
| `/performance` | Show performance of each finished trade grouped by pair
| `/balance` | Show account balance per currency
| `/balance` | Show bot managed balance per currency
| `/balance full` | Show account balance per currency
| `/daily <n>` | Shows profit or loss per day, over the last n days (n defaults to 7)
| `/weekly <n>` | Shows profit or loss per week, over the last n weeks (n defaults to 8)
| `/monthly <n>` | Shows profit or loss per month, over the last n months (n defaults to 6)
@@ -202,7 +204,6 @@ official commands. You can ask at any moment for help with `/help`.
| `/blacklist [pair]` | Show the current blacklist, or adds a pair to the blacklist.
| `/edge` | Show validated pairs by Edge if it is enabled.
## Telegram commands in action
Below, example of Telegram message you will receive for each command.

View File

@@ -723,6 +723,9 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V]
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
[--timerange YYYYMMDD-[YYYYMMDD]]
[--rejected]
[--analysis-to-csv]
[--analysis-csv-path PATH]
optional arguments:
-h, --help show this help message and exit
@@ -736,19 +739,27 @@ optional arguments:
pair and enter_tag, 4: by pair, enter_ and exit_tag
(this can get quite large)
--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]
Comma separated list of entry signals to analyse.
Default: all. e.g. 'entry_tag_a,entry_tag_b'
Space separated list of entry signals to analyse.
Default: all. e.g. 'entry_tag_a entry_tag_b'
--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]
Comma separated list of exit signals to analyse.
Space separated list of exit signals to analyse.
Default: all. e.g.
'exit_tag_a,roi,stop_loss,trailing_stop_loss'
'exit_tag_a roi stop_loss trailing_stop_loss'
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
Comma separated list of indicators to analyse. e.g.
'close,rsi,bb_lowerband,profit_abs'
Space separated list of indicators to analyse. e.g.
'close rsi bb_lowerband profit_abs'
--timerange YYYYMMDD-[YYYYMMDD]
Timerange to filter trades for analysis,
start inclusive, end exclusive. e.g.
20220101-20220201
--rejected
Print out rejected trades table
--analysis-to-csv
Write out tables to individual CSVs, by default to
'user_data/backtest_results' unless '--analysis-csv-path' is given.
--analysis-csv-path [PATH]
Optional path where individual CSVs will be written. If not used,
CSVs will be written to 'user_data/backtest_results'.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).

View File

View File

@@ -1,5 +1,5 @@
""" Freqtrade bot """
__version__ = '2023.4.dev'
__version__ = '2023.7.dev'
if 'dev' in __version__:
from pathlib import Path

View File

@@ -19,7 +19,8 @@ from freqtrade.commands.list_commands import (start_list_exchanges, start_list_f
start_list_markets, start_list_strategies,
start_list_timeframes, start_show_trades)
from freqtrade.commands.optimize_commands import (start_backtesting, start_backtesting_show,
start_edge, start_hyperopt)
start_edge, start_hyperopt,
start_lookahead_analysis)
from freqtrade.commands.pairlist_commands import start_test_pairlist
from freqtrade.commands.plot_commands import start_plot_dataframe, start_plot_profit
from freqtrade.commands.strategy_utils_commands import start_strategy_update

32
freqtrade/commands/arguments.py Normal file → Executable file
View File

@@ -46,7 +46,7 @@ ARGS_LIST_FREQAIMODELS = ["freqaimodel_path", "print_one_column", "print_coloriz
ARGS_LIST_HYPEROPTS = ["hyperopt_path", "print_one_column", "print_colorized"]
ARGS_BACKTEST_SHOW = ["exportfilename", "backtest_show_pair_list"]
ARGS_BACKTEST_SHOW = ["exportfilename", "backtest_show_pair_list", "backtest_breakdown"]
ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"]
@@ -67,8 +67,7 @@ ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "template"]
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"]
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode",
"candle_types"]
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode", "candle_types"]
ARGS_CONVERT_TRADES = ["pairs", "timeframes", "exchange", "dataformat_ohlcv", "dataformat_trades"]
@@ -106,7 +105,8 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
"disableparamexport", "backtest_breakdown"]
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
"exit_reason_list", "indicator_list", "timerange"]
"exit_reason_list", "indicator_list", "timerange",
"analysis_rejected", "analysis_to_csv", "analysis_csv_path"]
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
@@ -116,7 +116,11 @@ NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"]
ARGS_STRATEGY_UTILS = ["strategy_list", "strategy_path", "recursive_strategy_search"]
ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_search"]
ARGS_LOOKAHEAD_ANALYSIS = [
a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", 'cache')
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
class Arguments:
@@ -200,8 +204,9 @@ class Arguments:
start_install_ui, start_list_data, start_list_exchanges,
start_list_freqAI_models, start_list_markets,
start_list_strategies, start_list_timeframes,
start_new_config, start_new_strategy, start_plot_dataframe,
start_plot_profit, start_show_trades, start_strategy_update,
start_lookahead_analysis, start_new_config,
start_new_strategy, start_plot_dataframe, start_plot_profit,
start_show_trades, start_strategy_update,
start_test_pairlist, start_trading, start_webserver)
subparsers = self.parser.add_subparsers(dest='command',
@@ -450,4 +455,15 @@ class Arguments:
'files to the current version',
parents=[_common_parser])
strategy_updater_cmd.set_defaults(func=start_strategy_update)
self._build_args(optionlist=ARGS_STRATEGY_UTILS, parser=strategy_updater_cmd)
self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd)
# Add lookahead_analysis subcommand
lookahead_analayis_cmd = subparsers.add_parser(
'lookahead-analysis',
help="Check for potential look ahead bias.",
parents=[_common_parser, _strategy_parser])
lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis)
self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS,
parser=lookahead_analayis_cmd)

52
freqtrade/commands/cli_options.py Normal file → Executable file
View File

@@ -381,7 +381,7 @@ AVAILABLE_CLI_OPTIONS = {
),
"candle_types": Arg(
'--candle-types',
help='Select candle type to use',
help='Select candle type to convert. Defaults to all available types.',
choices=[c.value for c in CandleType],
nargs='+',
),
@@ -450,14 +450,12 @@ AVAILABLE_CLI_OPTIONS = {
),
"exchange": Arg(
'--exchange',
help=f'Exchange name (default: `{constants.DEFAULT_EXCHANGE}`). '
f'Only valid if no config is provided.',
help='Exchange name. Only valid if no config is provided.',
),
"timeframes": Arg(
'-t', '--timeframes',
help='Specify which tickers to download. Space-separated list. '
'Default: `1m 5m`.',
default=['1m', '5m'],
nargs='+',
),
"prepend_data": Arg(
@@ -636,30 +634,45 @@ AVAILABLE_CLI_OPTIONS = {
"4: by pair, enter_ and exit_tag (this can get quite large), "
"5: by exit_tag"),
nargs='+',
default=['0', '1', '2'],
default=[],
choices=['0', '1', '2', '3', '4', '5'],
),
"enter_reason_list": Arg(
"--enter-reason-list",
help=("Comma separated list of entry signals to analyse. Default: all. "
"e.g. 'entry_tag_a,entry_tag_b'"),
help=("Space separated list of entry signals to analyse. Default: all. "
"e.g. 'entry_tag_a entry_tag_b'"),
nargs='+',
default=['all'],
),
"exit_reason_list": Arg(
"--exit-reason-list",
help=("Comma separated list of exit signals to analyse. Default: all. "
"e.g. 'exit_tag_a,roi,stop_loss,trailing_stop_loss'"),
help=("Space separated list of exit signals to analyse. Default: all. "
"e.g. 'exit_tag_a roi stop_loss trailing_stop_loss'"),
nargs='+',
default=['all'],
),
"indicator_list": Arg(
"--indicator-list",
help=("Comma separated list of indicators to analyse. "
"e.g. 'close,rsi,bb_lowerband,profit_abs'"),
help=("Space separated list of indicators to analyse. "
"e.g. 'close rsi bb_lowerband profit_abs'"),
nargs='+',
default=[],
),
"analysis_rejected": Arg(
'--rejected-signals',
help='Analyse rejected signals',
action='store_true',
),
"analysis_to_csv": Arg(
'--analysis-to-csv',
help='Save selected analysis tables to individual CSVs',
action='store_true',
),
"analysis_csv_path": Arg(
'--analysis-csv-path',
help=("Specify a path to save the analysis CSVs "
"if --analysis-to-csv is enabled. Default: user_data/basktesting_results/"),
),
"freqaimodel": Arg(
'--freqaimodel',
help='Specify a custom freqaimodels.',
@@ -675,4 +688,21 @@ AVAILABLE_CLI_OPTIONS = {
help='Run backtest with ready models.',
action='store_true'
),
"minimum_trade_amount": Arg(
'--minimum-trade-amount',
help='Minimum trade amount for lookahead-analysis',
type=check_int_positive,
metavar='INT',
),
"targeted_trade_amount": Arg(
'--targeted-trade-amount',
help='Targeted trade amount for lookahead analysis',
type=check_int_positive,
metavar='INT',
),
"lookahead_analysis_exportfilename": Arg(
'--lookahead-analysis-exportfilename',
help="Use this csv-filename to store lookahead-analysis-results",
type=str
),
}

View File

@@ -1,18 +1,16 @@
import logging
import sys
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict, List
from typing import Any, Dict
from freqtrade.configuration import TimeRange, setup_utils_configuration
from freqtrade.constants import DATETIME_PRINT_FORMAT, Config
from freqtrade.constants import DATETIME_PRINT_FORMAT, DL_DATA_TIMEFRAMES, Config
from freqtrade.data.converter import convert_ohlcv_format, convert_trades_format
from freqtrade.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data)
from freqtrade.enums import CandleType, RunMode, TradingMode
from freqtrade.data.history import convert_trades_to_ohlcv, download_data_main
from freqtrade.enums import RunMode, TradingMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import market_is_active, timeframe_to_minutes
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist, expand_pairlist
from freqtrade.exchange import timeframe_to_minutes
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
from freqtrade.resolvers import ExchangeResolver
from freqtrade.util.binance_mig import migrate_binance_futures_data
@@ -20,7 +18,7 @@ from freqtrade.util.binance_mig import migrate_binance_futures_data
logger = logging.getLogger(__name__)
def _data_download_sanity(config: Config) -> None:
def _check_data_config_download_sanity(config: Config) -> None:
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
@@ -37,78 +35,14 @@ def start_download_data(args: Dict[str, Any]) -> None:
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
_data_download_sanity(config)
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
pairs_not_available: List[str] = []
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
markets = [p for p, m in exchange.markets.items() if market_is_active(m)
or config.get('include_inactive')]
expanded_pairs = dynamic_expand_pairlist(config, markets)
# Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False):
exchange.validate_pairs(expanded_pairs)
logger.info(f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
_check_data_config_download_sanity(config)
try:
if config.get('download_trades'):
if config.get('trading_mode') == 'futures':
raise OperationalException("Trade download not supported for futures.")
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=expanded_pairs, datadir=config['datadir'],
timerange=timerange, new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
if not exchange.get_option('ohlcv_has_history', True):
raise OperationalException(
f"Historic klines not available for {exchange.name}. "
"Please use `--dl-trades` instead for this exchange "
"(will unfortunately take a long time)."
)
migrate_binance_futures_data(config)
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange,
new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'],
trading_mode=config.get('trading_mode', 'spot'),
prepend=config.get('prepend_data', False)
)
download_data_main(config)
except KeyboardInterrupt:
sys.exit("SIGINT received, aborting ...")
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
def start_convert_trades(args: Dict[str, Any]) -> None:
@@ -123,9 +57,11 @@ def start_convert_trades(args: Dict[str, Any]) -> None:
raise OperationalException(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
if 'timeframes' not in config:
config['timeframes'] = DL_DATA_TIMEFRAMES
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
exchange = ExchangeResolver.load_exchange(config, validate=False)
# Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False):
exchange.validate_pairs(config['pairs'])
@@ -152,11 +88,10 @@ def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
migrate_binance_futures_data(config)
candle_types = [CandleType.from_string(ct) for ct in config.get('candle_types', ['spot'])]
for candle_type in candle_types:
convert_ohlcv_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'], candle_type=candle_type)
convert_ohlcv_format(config,
convert_from=args['format_from'],
convert_to=args['format_to'],
erase=args['erase'])
else:
convert_trades_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],

View File

@@ -1,7 +1,7 @@
import csv
import logging
import sys
from typing import Any, Dict, List
from typing import Any, Dict, List, Union
import rapidjson
from colorama import Fore, Style
@@ -11,9 +11,10 @@ from tabulate import tabulate
from freqtrade.configuration import setup_utils_configuration
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import market_is_active, validate_exchanges
from freqtrade.exchange import list_available_exchanges, market_is_active
from freqtrade.misc import parse_db_uri_for_logging, plural
from freqtrade.resolvers import ExchangeResolver, StrategyResolver
from freqtrade.types import ValidExchangesType
logger = logging.getLogger(__name__)
@@ -25,18 +26,42 @@ def start_list_exchanges(args: Dict[str, Any]) -> None:
:param args: Cli args from Arguments()
:return: None
"""
exchanges = validate_exchanges(args['list_exchanges_all'])
exchanges = list_available_exchanges(args['list_exchanges_all'])
if args['print_one_column']:
print('\n'.join([e[0] for e in exchanges]))
print('\n'.join([e['name'] for e in exchanges]))
else:
headers = {
'name': 'Exchange name',
'supported': 'Supported',
'trade_modes': 'Markets',
'comment': 'Reason',
}
headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})
def build_entry(exchange: ValidExchangesType, valid: bool):
valid_entry = {'valid': exchange['valid']} if valid else {}
result: Dict[str, Union[str, bool]] = {
'name': exchange['name'],
**valid_entry,
'supported': 'Official' if exchange['supported'] else '',
'trade_modes': ', '.join(
(f"{a['margin_mode']} " if a['margin_mode'] else '') + a['trading_mode']
for a in exchange['trade_modes']
),
'comment': exchange['comment'],
}
return result
if args['list_exchanges_all']:
print("All exchanges supported by the ccxt library:")
exchanges = [build_entry(e, True) for e in exchanges]
else:
print("Exchanges available for Freqtrade:")
exchanges = [e for e in exchanges if e[1] is not False]
exchanges = [build_entry(e, False) for e in exchanges if e['valid'] is not False]
print(tabulate(exchanges, headers=['Exchange name', 'Valid', 'reason']))
print(tabulate(exchanges, headers=headers, ))
def _print_objs_tabular(objs: List, print_colorized: bool) -> None:
@@ -114,7 +139,7 @@ def start_list_timeframes(args: Dict[str, Any]) -> None:
config['timeframe'] = None
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
exchange = ExchangeResolver.load_exchange(config, validate=False)
if args['print_one_column']:
print('\n'.join(exchange.timeframes))
@@ -133,7 +158,7 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
exchange = ExchangeResolver.load_exchange(config, validate=False)
# By default only active pairs/markets are to be shown
active_only = not args.get('list_pairs_all', False)

View File

@@ -132,3 +132,15 @@ def start_edge(args: Dict[str, Any]) -> None:
# Initialize Edge object
edge_cli = EdgeCli(config)
edge_cli.start()
def start_lookahead_analysis(args: Dict[str, Any]) -> None:
"""
Start the backtest bias tester script
:param args: Cli args from Arguments()
:return: None
"""
from freqtrade.optimize.lookahead_analysis_helpers import LookaheadAnalysisSubFunctions
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
LookaheadAnalysisSubFunctions.start(config)

View File

@@ -18,7 +18,7 @@ def start_test_pairlist(args: Dict[str, Any]) -> None:
from freqtrade.plugins.pairlistmanager import PairListManager
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
exchange = ExchangeResolver.load_exchange(config, validate=False)
quote_currencies = args.get('quote_currencies')
if not quote_currencies:

View File

@@ -174,7 +174,7 @@ def _validate_whitelist(conf: Dict[str, Any]) -> None:
return
for pl in conf.get('pairlists', [{'method': 'StaticPairList'}]):
if (pl.get('method') == 'StaticPairList'
if (isinstance(pl, dict) and pl.get('method') == 'StaticPairList'
and not conf.get('exchange', {}).get('pair_whitelist')):
raise OperationalException("StaticPairList requires pair_whitelist to be set.")

View File

@@ -203,7 +203,7 @@ class Configuration:
# This will override the strategy configuration
self._args_to_config(config, argname='timeframe',
logstring='Parameter -i/--timeframe detected ... '
'Using timeframe: {} ...')
'Using timeframe: {} ...')
self._args_to_config(config, argname='position_stacking',
logstring='Parameter --enable-position-stacking detected ...')
@@ -300,6 +300,9 @@ class Configuration:
self._args_to_config(config, argname='hyperoptexportfilename',
logstring='Using hyperopt file: {}')
self._args_to_config(config, argname='lookahead_analysis_exportfilename',
logstring='Saving lookahead analysis results into {} ...')
self._args_to_config(config, argname='epochs',
logstring='Parameter --epochs detected ... '
'Will run Hyperopt with for {} epochs ...'
@@ -465,6 +468,28 @@ class Configuration:
self._args_to_config(config, argname='timerange',
logstring='Filter trades by timerange: {}')
self._args_to_config(config, argname='analysis_rejected',
logstring='Analyse rejected signals: {}')
self._args_to_config(config, argname='analysis_to_csv',
logstring='Store analysis tables to CSV: {}')
self._args_to_config(config, argname='analysis_csv_path',
logstring='Path to store analysis CSVs: {}')
self._args_to_config(config, argname='analysis_csv_path',
logstring='Path to store analysis CSVs: {}')
# Lookahead analysis results
self._args_to_config(config, argname='targeted_trade_amount',
logstring='Targeted Trade amount: {}')
self._args_to_config(config, argname='minimum_trade_amount',
logstring='Minimum Trade amount: {}')
self._args_to_config(config, argname='lookahead_analysis_exportfilename',
logstring='Path to store lookahead-analysis-results: {}')
def _process_runmode(self, config: Config) -> None:
self._args_to_config(config, argname='dry_run',
@@ -543,6 +568,7 @@ class Configuration:
# Fall back to /dl_path/pairs.json
pairs_file = config['datadir'] / 'pairs.json'
if pairs_file.exists():
logger.info(f'Reading pairs file "{pairs_file}".')
config['pairs'] = load_file(pairs_file)
if 'pairs' in config and isinstance(config['pairs'], list):
config['pairs'].sort()

View File

@@ -6,7 +6,7 @@ import re
from datetime import datetime, timezone
from typing import Optional
import arrow
from typing_extensions import Self
from freqtrade.constants import DATETIME_PRINT_FORMAT
from freqtrade.exceptions import OperationalException
@@ -109,15 +109,15 @@ class TimeRange:
self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)
self.starttype = 'date'
@staticmethod
def parse_timerange(text: Optional[str]) -> 'TimeRange':
@classmethod
def parse_timerange(cls, text: Optional[str]) -> Self:
"""
Parse the value of the argument --timerange to determine what is the range desired
:param text: value from --timerange
:return: Start and End range period
"""
if not text:
return TimeRange(None, None, 0, 0)
return cls(None, None, 0, 0)
syntax = [(r'^-(\d{8})$', (None, 'date')),
(r'^(\d{8})-$', ('date', None)),
(r'^(\d{8})-(\d{8})$', ('date', 'date')),
@@ -139,7 +139,8 @@ class TimeRange:
if stype[0]:
starts = rvals[index]
if stype[0] == 'date' and len(starts) == 8:
start = arrow.get(starts, 'YYYYMMDD').int_timestamp
start = int(datetime.strptime(starts, '%Y%m%d').replace(
tzinfo=timezone.utc).timestamp())
elif len(starts) == 13:
start = int(starts) // 1000
else:
@@ -148,7 +149,8 @@ class TimeRange:
if stype[1]:
stops = rvals[index]
if stype[1] == 'date' and len(stops) == 8:
stop = arrow.get(stops, 'YYYYMMDD').int_timestamp
stop = int(datetime.strptime(stops, '%Y%m%d').replace(
tzinfo=timezone.utc).timestamp())
elif len(stops) == 13:
stop = int(stops) // 1000
else:
@@ -156,5 +158,5 @@ class TimeRange:
if start > stop > 0:
raise OperationalException(
f'Start date is after stop date for timerange "{text}"')
return TimeRange(stype[0], stype[1], start, stop)
return cls(stype[0], stype[1], start, stop)
raise OperationalException(f'Incorrect syntax for timerange "{text}"')

View File

@@ -8,8 +8,8 @@ from typing import Any, Dict, List, Literal, Tuple
from freqtrade.enums import CandleType, PriceType, RPCMessageType
DOCS_LINK = "https://www.freqtrade.io/en/stable"
DEFAULT_CONFIG = 'config.json'
DEFAULT_EXCHANGE = 'bittrex'
PROCESS_THROTTLE_SECS = 5 # sec
HYPEROPT_EPOCH = 100 # epochs
RETRY_TIMEOUT = 30 # sec
@@ -65,6 +65,7 @@ TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']
WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']
FULL_DATAFRAME_THRESHOLD = 100
CUSTOM_TAG_MAX_LENGTH = 255
DL_DATA_TIMEFRAMES = ['1m', '5m']
ENV_VAR_PREFIX = 'FREQTRADE__'
@@ -111,6 +112,8 @@ MINIMAL_CONFIG = {
}
}
__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}
# Required json-schema for user specified config
CONF_SCHEMA = {
'type': 'object',
@@ -148,7 +151,6 @@ CONF_SCHEMA = {
'patternProperties': {
'^[0-9.]+$': {'type': 'number'}
},
'minProperties': 1
},
'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5},
'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True, 'minimum': -1},
@@ -164,6 +166,9 @@ CONF_SCHEMA = {
'trading_mode': {'type': 'string', 'enum': TRADING_MODES},
'margin_mode': {'type': 'string', 'enum': MARGIN_MODES},
'reduce_df_footprint': {'type': 'boolean', 'default': False},
'minimum_trade_amount': {'type': 'number', 'default': 10},
'targeted_trade_amount': {'type': 'number', 'default': 20},
'lookahead_analysis_exportfilename': {'type': 'string'},
'liquidation_buffer': {'type': 'number', 'minimum': 0.0, 'maximum': 0.99},
'backtest_breakdown': {
'type': 'array',
@@ -351,7 +356,8 @@ CONF_SCHEMA = {
'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},
'retries': {'type': 'integer', 'minimum': 0},
'retry_delay': {'type': 'number', 'minimum': 0},
**dict([(x, {'type': 'object'}) for x in RPCMessageType]),
**__MESSAGE_TYPE_DICT,
# **{x: {'type': 'object'} for x in RPCMessageType},
# Below -> Deprecated
'webhookentry': {'type': 'object'},
'webhookentrycancel': {'type': 'object'},
@@ -690,4 +696,6 @@ BidAsk = Literal['bid', 'ask']
OBLiteral = Literal['asks', 'bids']
Config = Dict[str, Any]
# Exchange part of the configuration.
ExchangeConfig = Dict[str, Any]
IntOrInf = float

View File

@@ -11,7 +11,7 @@ import pandas as pd
from pandas import DataFrame, to_datetime
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, Config, TradeList
from freqtrade.enums import CandleType
from freqtrade.enums import CandleType, TradingMode
logger = logging.getLogger(__name__)
@@ -264,7 +264,6 @@ def convert_ohlcv_format(
convert_from: str,
convert_to: str,
erase: bool,
candle_type: CandleType
):
"""
Convert OHLCV from one format to another
@@ -272,7 +271,6 @@ def convert_ohlcv_format(
:param convert_from: Source format
:param convert_to: Target format
:param erase: Erase source data (does not apply if source and target format are identical)
:param candle_type: Any of the enum CandleType (must match trading mode!)
"""
from freqtrade.data.history.idatahandler import get_datahandler
src = get_datahandler(config['datadir'], convert_from)
@@ -280,37 +278,45 @@ def convert_ohlcv_format(
timeframes = config.get('timeframes', [config.get('timeframe')])
logger.info(f"Converting candle (OHLCV) for timeframe {timeframes}")
if 'pairs' not in config:
config['pairs'] = []
# Check timeframes or fall back to timeframe.
for timeframe in timeframes:
config['pairs'].extend(src.ohlcv_get_pairs(
config['datadir'],
timeframe,
candle_type=candle_type
))
config['pairs'] = sorted(set(config['pairs']))
logger.info(f"Converting candle (OHLCV) data for {config['pairs']}")
candle_types = [CandleType.from_string(ct) for ct in config.get('candle_types', [
c.value for c in CandleType])]
logger.info(candle_types)
paircombs = src.ohlcv_get_available_data(config['datadir'], TradingMode.SPOT)
paircombs.extend(src.ohlcv_get_available_data(config['datadir'], TradingMode.FUTURES))
for timeframe in timeframes:
for pair in config['pairs']:
data = src.ohlcv_load(pair=pair, timeframe=timeframe,
timerange=None,
fill_missing=False,
drop_incomplete=False,
startup_candles=0,
candle_type=candle_type)
logger.info(f"Converting {len(data)} {timeframe} {candle_type} candles for {pair}")
if len(data) > 0:
trg.ohlcv_store(
pair=pair,
timeframe=timeframe,
data=data,
candle_type=candle_type
)
if erase and convert_from != convert_to:
logger.info(f"Deleting source data for {pair} / {timeframe}")
src.ohlcv_purge(pair=pair, timeframe=timeframe, candle_type=candle_type)
if 'pairs' in config:
# Filter pairs
paircombs = [comb for comb in paircombs if comb[0] in config['pairs']]
if 'timeframes' in config:
paircombs = [comb for comb in paircombs if comb[1] in config['timeframes']]
paircombs = [comb for comb in paircombs if comb[2] in candle_types]
paircombs = sorted(paircombs, key=lambda x: (x[0], x[1], x[2].value))
formatted_paircombs = '\n'.join([f"{pair}, {timeframe}, {candle_type}"
for pair, timeframe, candle_type in paircombs])
logger.info(f"Converting candle (OHLCV) data for the following pair combinations:\n"
f"{formatted_paircombs}")
for pair, timeframe, candle_type in paircombs:
data = src.ohlcv_load(pair=pair, timeframe=timeframe,
timerange=None,
fill_missing=False,
drop_incomplete=False,
startup_candles=0,
candle_type=candle_type)
logger.info(f"Converting {len(data)} {timeframe} {candle_type} candles for {pair}")
if len(data) > 0:
trg.ohlcv_store(
pair=pair,
timeframe=timeframe,
data=data,
candle_type=candle_type
)
if erase and convert_from != convert_to:
logger.info(f"Deleting source data for {pair} / {timeframe}")
src.ohlcv_purge(pair=pair, timeframe=timeframe, candle_type=candle_type)
def reduce_dataframe_footprint(df: DataFrame) -> DataFrame:

View File

@@ -1,5 +1,6 @@
import logging
from pathlib import Path
from typing import List
import joblib
import pandas as pd
@@ -15,22 +16,31 @@ from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__)
def _load_signal_candles(backtest_dir: Path):
def _load_backtest_analysis_data(backtest_dir: Path, name: str):
if backtest_dir.is_dir():
scpf = Path(backtest_dir,
Path(get_latest_backtest_filename(backtest_dir)).stem + "_signals.pkl"
Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl"
)
else:
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_signals.pkl")
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
try:
with scpf.open("rb") as scp:
signal_candles = joblib.load(scp)
logger.info(f"Loaded signal candles: {str(scpf)}")
loaded_data = joblib.load(scp)
logger.info(f"Loaded {name} candles: {str(scpf)}")
except Exception as e:
logger.error("Cannot load signal candles from pickled results: ", e)
logger.error(f"Cannot load {name} data from pickled results: ", e)
return None
return signal_candles
return loaded_data
def _load_rejected_signals(backtest_dir: Path):
return _load_backtest_analysis_data(backtest_dir, "rejected")
def _load_signal_candles(backtest_dir: Path):
return _load_backtest_analysis_data(backtest_dir, "signals")
def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_candles):
@@ -43,9 +53,7 @@ def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_cand
for pair in pairlist:
if pair in signal_candles[strategy_name]:
analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators(
pair,
trades,
signal_candles[strategy_name][pair])
pair, trades, signal_candles[strategy_name][pair])
except Exception as e:
print(f"Cannot process entry/exit reasons for {strategy_name}: ", e)
@@ -85,7 +93,7 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles:
return pd.DataFrame()
def _do_group_table_output(bigdf, glist):
def _do_group_table_output(bigdf, glist, csv_path: Path, to_csv=False, ):
for g in glist:
# 0: summary wins/losses grouped by enter tag
if g == "0":
@@ -116,7 +124,8 @@ def _do_group_table_output(bigdf, glist):
sortcols = ['total_num_buys']
_print_table(new, sortcols, show_index=True)
_print_table(new, sortcols, show_index=True, name="Group 0:",
to_csv=to_csv, csv_path=csv_path)
else:
agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'],
@@ -154,11 +163,24 @@ def _do_group_table_output(bigdf, glist):
new['mean_profit_pct'] = new['mean_profit_pct'] * 100
new['total_profit_pct'] = new['total_profit_pct'] * 100
_print_table(new, sortcols)
_print_table(new, sortcols, name=f"Group {g}:",
to_csv=to_csv, csv_path=csv_path)
else:
logger.warning("Invalid group mask specified.")
def _do_rejected_signals_output(rejected_signals_df: pd.DataFrame,
to_csv: bool = False, csv_path=None) -> None:
cols = ['pair', 'date', 'enter_tag']
sortcols = ['date', 'pair', 'enter_tag']
_print_table(rejected_signals_df[cols],
sortcols,
show_index=False,
name="Rejected Signals:",
to_csv=to_csv,
csv_path=csv_path)
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
if timerange:
if timerange.starttype == 'date':
@@ -192,38 +214,64 @@ def prepare_results(analysed_trades, stratname,
return res_df
def print_results(res_df, analysis_groups, indicator_list):
def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_list: List[str],
csv_path: Path, rejected_signals=None, to_csv=False):
if res_df.shape[0] > 0:
if analysis_groups:
_do_group_table_output(res_df, analysis_groups)
_do_group_table_output(res_df, analysis_groups, to_csv=to_csv, csv_path=csv_path)
if rejected_signals is not None:
if rejected_signals.empty:
print("There were no rejected signals.")
else:
_do_rejected_signals_output(rejected_signals, to_csv=to_csv, csv_path=csv_path)
# NB this can be large for big dataframes!
if "all" in indicator_list:
print(res_df)
elif indicator_list is not None:
_print_table(res_df,
show_index=False,
name="Indicators:",
to_csv=to_csv,
csv_path=csv_path)
elif indicator_list is not None and indicator_list:
available_inds = []
for ind in indicator_list:
if ind in res_df:
available_inds.append(ind)
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
_print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False)
_print_table(res_df[ilist],
sortcols=['exit_reason'],
show_index=False,
name="Indicators:",
to_csv=to_csv,
csv_path=csv_path)
else:
print("\\No trades to show")
def _print_table(df, sortcols=None, show_index=False):
def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None,
to_csv=False, csv_path: Path):
if (sortcols is not None):
data = df.sort_values(sortcols)
else:
data = df
print(
tabulate(
data,
headers='keys',
tablefmt='psql',
showindex=show_index
if to_csv:
safe_name = Path(csv_path, name.lower().replace(" ", "_").replace(":", "") + ".csv")
data.to_csv(safe_name)
print(f"Saved {name} to {safe_name}")
else:
if name is not None:
print(name)
print(
tabulate(
data,
headers='keys',
tablefmt='psql',
showindex=show_index
)
)
)
def process_entry_exit_reasons(config: Config):
@@ -232,6 +280,11 @@ def process_entry_exit_reasons(config: Config):
enter_reason_list = config.get('enter_reason_list', ["all"])
exit_reason_list = config.get('exit_reason_list', ["all"])
indicator_list = config.get('indicator_list', [])
do_rejected = config.get('analysis_rejected', False)
to_csv = config.get('analysis_to_csv', False)
csv_path = Path(config.get('analysis_csv_path', config['exportfilename']))
if to_csv and not csv_path.is_dir():
raise OperationalException(f"Specified directory {csv_path} does not exist.")
timerange = TimeRange.parse_timerange(None if config.get(
'timerange') is None else str(config.get('timerange')))
@@ -241,8 +294,16 @@ def process_entry_exit_reasons(config: Config):
for strategy_name, results in backtest_stats['strategy'].items():
trades = load_backtest_data(config['exportfilename'], strategy_name)
if not trades.empty:
if trades is not None and not trades.empty:
signal_candles = _load_signal_candles(config['exportfilename'])
rej_df = None
if do_rejected:
rejected_signals_dict = _load_rejected_signals(config['exportfilename'])
rej_df = prepare_results(rejected_signals_dict, strategy_name,
enter_reason_list, exit_reason_list,
timerange=timerange)
analysed_trades_dict = _process_candles_and_indicators(
config['exchange']['pair_whitelist'], strategy_name,
trades, signal_candles)
@@ -253,7 +314,10 @@ def process_entry_exit_reasons(config: Config):
print_results(res_df,
analysis_groups,
indicator_list)
indicator_list,
rejected_signals=rej_df,
to_csv=to_csv,
csv_path=csv_path)
except ValueError as e:
raise OperationalException(e) from e

View File

@@ -6,7 +6,7 @@ Includes:
* download data from exchange and store to disk
"""
# flake8: noqa: F401
from .history_utils import (convert_trades_to_ohlcv, get_timerange, load_data, load_pair_history,
refresh_backtest_ohlcv_data, refresh_backtest_trades_data, refresh_data,
validate_backtest_data)
from .history_utils import (convert_trades_to_ohlcv, download_data_main, get_timerange, load_data,
load_pair_history, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data, refresh_data, validate_backtest_data)
from .idatahandler import get_datahandler

View File

@@ -1,21 +1,23 @@
import logging
import operator
from datetime import datetime
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import arrow
from pandas import DataFrame, concat
from freqtrade.configuration import TimeRange
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS
from freqtrade.constants import (DATETIME_PRINT_FORMAT, DEFAULT_DATAFRAME_COLUMNS,
DL_DATA_TIMEFRAMES, Config)
from freqtrade.data.converter import (clean_ohlcv_dataframe, ohlcv_to_dataframe,
trades_remove_duplicates, trades_to_ohlcv)
from freqtrade.data.history.idatahandler import IDataHandler, get_datahandler
from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import Exchange
from freqtrade.misc import format_ms_time
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
from freqtrade.util import format_ms_time
from freqtrade.util.binance_mig import migrate_binance_futures_data
logger = logging.getLogger(__name__)
@@ -228,16 +230,18 @@ def _download_pair_history(pair: str, *,
)
logger.debug("Current Start: %s",
f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug("Current End: %s",
f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
# Default since_ms to 30 days if nothing is given
new_data = exchange.get_historic_ohlcv(pair=pair,
timeframe=timeframe,
since_ms=since_ms if since_ms else
arrow.utcnow().shift(
days=-new_pairs_days).int_timestamp * 1000,
int((datetime.now() - timedelta(days=new_pairs_days)
).timestamp()) * 1000,
is_new_pair=data.empty,
candle_type=candle_type,
until_ms=until_ms if until_ms else None
@@ -253,10 +257,12 @@ def _download_pair_history(pair: str, *,
data = clean_ohlcv_dataframe(concat([data, new_dataframe], axis=0), timeframe, pair,
fill_missing=False, drop_incomplete=False)
logger.debug("New Start: %s",
f"{data.iloc[0]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
logger.debug("New Start: %s",
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug("New End: %s",
f"{data.iloc[-1]['date']:DATETIME_PRINT_FORMAT}" if not data.empty else 'None')
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
data_handler.ohlcv_store(pair, timeframe, data=data, candle_type=candle_type)
return True
@@ -291,7 +297,7 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes
continue
for timeframe in timeframes:
logger.info(f'Downloading pair {pair}, interval {timeframe}.')
logger.debug(f'Downloading pair {pair}, {candle_type}, interval {timeframe}.')
process = f'{idx}/{len(pairs)}'
_download_pair_history(pair=pair, process=process,
datadir=datadir, exchange=exchange,
@@ -349,7 +355,7 @@ def _download_trades_history(exchange: Exchange,
trades = []
if not since:
since = arrow.utcnow().shift(days=-new_pairs_days).int_timestamp * 1000
since = int((datetime.now() - timedelta(days=new_pairs_days)).timestamp()) * 1000
from_id = trades[-1][1] if trades else None
if trades and since < trades[-1][0]:
@@ -480,3 +486,79 @@ def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime,
logger.warning("%s has missing frames: expected %s, got %s, that's %s missing values",
pair, expected_frames, dflen, expected_frames - dflen)
return found_missing
def download_data_main(config: Config) -> None:
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
pairs_not_available: List[str] = []
# Init exchange
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
exchange = ExchangeResolver.load_exchange(config, validate=False)
available_pairs = [
p for p in exchange.get_markets(
tradable_only=True, active_only=not config.get('include_inactive')
).keys()
]
expanded_pairs = dynamic_expand_pairlist(config, available_pairs)
if 'timeframes' not in config:
config['timeframes'] = DL_DATA_TIMEFRAMES
# Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False):
exchange.validate_pairs(expanded_pairs)
logger.info(f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
# Start downloading
try:
if config.get('download_trades'):
if config.get('trading_mode') == 'futures':
raise OperationalException("Trade download not supported for futures.")
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=expanded_pairs, datadir=config['datadir'],
timerange=timerange, new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
if not exchange.get_option('ohlcv_has_history', True):
raise OperationalException(
f"Historic klines not available for {exchange.name}. "
"Please use `--dl-trades` instead for this exchange "
"(will unfortunately take a long time)."
)
migrate_binance_futures_data(config)
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange,
new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'],
trading_mode=config.get('trading_mode', 'spot'),
prepend=config.get('prepend_data', False)
)
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")

View File

@@ -3,9 +3,9 @@
import logging
from collections import defaultdict
from copy import deepcopy
from datetime import timedelta
from typing import Any, Dict, List, NamedTuple
import arrow
import numpy as np
import utils_find_1st as utf1st
from pandas import DataFrame
@@ -18,6 +18,7 @@ from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
from freqtrade.strategy.interface import IStrategy
from freqtrade.util import dt_now
logger = logging.getLogger(__name__)
@@ -79,8 +80,8 @@ class Edge:
self._stoploss_range_step
)
self._timerange: TimeRange = TimeRange.parse_timerange("%s-" % arrow.now().shift(
days=-1 * self._since_number_of_days).format('YYYYMMDD'))
self._timerange: TimeRange = TimeRange.parse_timerange(
f"{(dt_now() - timedelta(days=self._since_number_of_days)).strftime('%Y%m%d')}-")
if config.get('fee'):
self.fee = config['fee']
else:
@@ -97,7 +98,7 @@ class Edge:
heartbeat = self.edge_config.get('process_throttle_secs')
if (self._last_updated > 0) and (
self._last_updated + heartbeat > arrow.utcnow().int_timestamp):
self._last_updated + heartbeat > int(dt_now().timestamp())):
return False
data: Dict[str, Any] = {}
@@ -189,7 +190,7 @@ class Edge:
# Fill missing, calculable columns, profit, duration , abs etc.
trades_df = self._fill_calculable_fields(DataFrame(trades))
self._cached_pairs = self._process_expectancy(trades_df)
self._last_updated = arrow.utcnow().int_timestamp
self._last_updated = int(dt_now().timestamp())
return True

View File

@@ -15,6 +15,7 @@ class ExitType(Enum):
EMERGENCY_EXIT = "emergency_exit"
CUSTOM_EXIT = "custom_exit"
PARTIAL_EXIT = "partial_exit"
SOLD_ON_EXCHANGE = "sold_on_exchange"
NONE = ""
def __str__(self):

View File

@@ -1,7 +1,7 @@
from enum import Enum
class MarginMode(Enum):
class MarginMode(str, Enum):
"""
Enum to distinguish between
cross margin/futures margin_mode and

View File

@@ -1,6 +1,6 @@
# flake8: noqa: F401
# isort: off
from freqtrade.exchange.common import remove_credentials, MAP_EXCHANGE_CHILDCLASS
from freqtrade.exchange.common import remove_exchange_credentials, MAP_EXCHANGE_CHILDCLASS
from freqtrade.exchange.exchange import Exchange
# isort: on
from freqtrade.exchange.binance import Binance
@@ -13,11 +13,11 @@ from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_c
amount_to_contracts, amount_to_precision,
available_exchanges, ccxt_exchanges,
contracts_to_amount, date_minus_candles,
is_exchange_known_ccxt, market_is_active,
price_to_precision, timeframe_to_minutes,
timeframe_to_msecs, timeframe_to_next_date,
timeframe_to_prev_date, timeframe_to_seconds,
validate_exchange, validate_exchanges)
is_exchange_known_ccxt, list_available_exchanges,
market_is_active, price_to_precision,
timeframe_to_minutes, timeframe_to_msecs,
timeframe_to_next_date, timeframe_to_prev_date,
timeframe_to_seconds, validate_exchange)
from freqtrade.exchange.gate import Gate
from freqtrade.exchange.hitbtc import Hitbtc
from freqtrade.exchange.huobi import Huobi

View File

@@ -1,10 +1,9 @@
""" Binance exchange subclass """
import logging
from datetime import datetime
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import arrow
import ccxt
from freqtrade.enums import CandleType, MarginMode, PriceType, TradingMode
@@ -66,7 +65,7 @@ class Binance(Exchange):
"""
try:
if self.trading_mode == TradingMode.FUTURES and not self._config['dry_run']:
position_side = self._api.fapiPrivateGetPositionsideDual()
position_side = self._api.fapiPrivateGetPositionSideDual()
self._log_exchange_response('position_side_setting', position_side)
assets_margin = self._api.fapiPrivateGetMultiAssetsMargin()
self._log_exchange_response('multi_asset_margin', assets_margin)
@@ -105,8 +104,9 @@ class Binance(Exchange):
if x and x[3] and x[3][0] and x[3][0][0] > since_ms:
# Set starting date to first available candle.
since_ms = x[3][0][0]
logger.info(f"Candle-data for {pair} available starting with "
f"{arrow.get(since_ms // 1000).isoformat()}.")
logger.info(
f"Candle-data for {pair} available starting with "
f"{datetime.fromtimestamp(since_ms // 1000, tz=timezone.utc).isoformat()}.")
return await super()._async_get_historic_ohlcv(
pair=pair,

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,7 @@ class Bybit(Exchange):
_ft_has: Dict = {
"ohlcv_candle_limit": 200,
"ohlcv_has_history": False,
"ohlcv_has_history": True,
}
_ft_has_futures: Dict = {
"ohlcv_has_history": True,

View File

@@ -4,6 +4,7 @@ import time
from functools import wraps
from typing import Any, Callable, Optional, TypeVar, cast, overload
from freqtrade.constants import ExchangeConfig
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
from freqtrade.mixins import LoggingMixin
@@ -84,20 +85,22 @@ EXCHANGE_HAS_OPTIONAL = [
# 'fetchPositions', # Futures trading
# 'fetchLeverageTiers', # Futures initialization
# 'fetchMarketLeverageTiers', # Futures initialization
# 'fetchOpenOrders', 'fetchClosedOrders', # 'fetchOrders', # Refinding balance...
]
def remove_credentials(config) -> None:
def remove_exchange_credentials(exchange_config: ExchangeConfig, dry_run: bool) -> None:
"""
Removes exchange keys from the configuration and specifies dry-run
Used for backtesting / hyperopt / edge and utils.
Modifies the input dict!
"""
if config.get('dry_run', False):
config['exchange']['key'] = ''
config['exchange']['secret'] = ''
config['exchange']['password'] = ''
config['exchange']['uid'] = ''
if dry_run:
exchange_config['key'] = ''
exchange_config['apiKey'] = ''
exchange_config['secret'] = ''
exchange_config['password'] = ''
exchange_config['uid'] = ''
def calculate_backoff(retrycount, max_retries):

View File

@@ -11,7 +11,6 @@ from math import floor
from threading import Lock
from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union
import arrow
import ccxt
import ccxt.async_support as ccxt_async
from cachetools import TTLCache
@@ -20,16 +19,16 @@ from dateutil import parser
from pandas import DataFrame, concat
from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk,
BuySell, Config, EntryExit, ListPairsWithTimeframes, MakerTaker,
OBLiteral, PairWithTimeframe)
BuySell, Config, EntryExit, ExchangeConfig,
ListPairsWithTimeframes, MakerTaker, OBLiteral, PairWithTimeframe)
from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list
from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, TradingMode
from freqtrade.enums.pricetype import PriceType
from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError,
InvalidOrderException, OperationalException, PricingError,
RetryableOrderError, TemporaryError)
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_credentials, retrier,
retrier_async)
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_exchange_credentials,
retrier, retrier_async)
from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType,
amount_to_contract_precision, amount_to_contracts,
amount_to_precision, contracts_to_amount,
@@ -42,6 +41,8 @@ from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers
from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json,
safe_value_fallback2)
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
from freqtrade.util import dt_from_ts, dt_now
from freqtrade.util.datetime_helpers import dt_humanize, dt_ts
logger = logging.getLogger(__name__)
@@ -92,8 +93,8 @@ class Exchange:
# TradingMode.SPOT always supported and not required in this list
]
def __init__(self, config: Config, validate: bool = True,
load_leverage_tiers: bool = False) -> None:
def __init__(self, config: Config, *, exchange_config: Optional[ExchangeConfig] = None,
validate: bool = True, load_leverage_tiers: bool = False) -> None:
"""
Initializes this module with the given config,
it does basic validation whether the specified exchange and pairs are valid.
@@ -107,8 +108,7 @@ class Exchange:
# Lock event loop. This is necessary to avoid race-conditions when using force* commands
# Due to funding fee fetching.
self._loop_lock = Lock()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop = self._init_async_loop()
self._config: Config = {}
self._config.update(config)
@@ -132,13 +132,13 @@ class Exchange:
# Holds all open sell orders for dry_run
self._dry_run_open_orders: Dict[str, Any] = {}
remove_credentials(config)
if config['dry_run']:
logger.info('Instance is running with dry_run enabled')
logger.info(f"Using CCXT {ccxt.__version__}")
exchange_config = config['exchange']
self.log_responses = exchange_config.get('log_responses', False)
exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config['exchange']
remove_exchange_credentials(exchange_conf, config.get('dry_run', False))
self.log_responses = exchange_conf.get('log_responses', False)
# Leverage properties
self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT)
@@ -153,8 +153,8 @@ class Exchange:
self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default))
if self.trading_mode == TradingMode.FUTURES:
self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has)
if exchange_config.get('_ft_has_params'):
self._ft_has = deep_merge_dicts(exchange_config.get('_ft_has_params'),
if exchange_conf.get('_ft_has_params'):
self._ft_has = deep_merge_dicts(exchange_conf.get('_ft_has_params'),
self._ft_has)
logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has)
@@ -166,18 +166,18 @@ class Exchange:
# Initialize ccxt objects
ccxt_config = self._ccxt_config
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}), ccxt_config)
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_sync_config', {}), ccxt_config)
ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_config)
ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_sync_config', {}), ccxt_config)
self._api = self._init_ccxt(exchange_config, ccxt_kwargs=ccxt_config)
self._api = self._init_ccxt(exchange_conf, ccxt_kwargs=ccxt_config)
ccxt_async_config = self._ccxt_config
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}),
ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}),
ccxt_async_config)
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_async_config', {}),
ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_async_config', {}),
ccxt_async_config)
self._api_async = self._init_ccxt(
exchange_config, ccxt_async, ccxt_kwargs=ccxt_async_config)
exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config)
logger.info(f'Using Exchange "{self.name}"')
self.required_candle_call_count = 1
@@ -190,8 +190,8 @@ class Exchange:
self._startup_candle_count, config.get('timeframe', ''))
# Converts the interval provided in minutes in config to seconds
self.markets_refresh_interval: int = exchange_config.get(
"markets_refresh_interval", 60) * 60
self.markets_refresh_interval: int = exchange_conf.get(
"markets_refresh_interval", 60) * 60 * 1000
if self.trading_mode != TradingMode.SPOT and load_leverage_tiers:
self.fill_leverage_tiers()
@@ -212,6 +212,11 @@ class Exchange:
if self.loop and not self.loop.is_closed():
self.loop.close()
def _init_async_loop(self) -> asyncio.AbstractEventLoop:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def validate_config(self, config):
# Check if timeframe is available
self.validate_timeframes(config.get('timeframe'))
@@ -296,7 +301,7 @@ class Exchange:
return list((self._api.timeframes or {}).keys())
@property
def markets(self) -> Dict:
def markets(self) -> Dict[str, Any]:
"""exchange ccxt markets"""
if not self._markets:
logger.info("Markets were not loaded. Loading them now..")
@@ -486,7 +491,7 @@ class Exchange:
try:
self._markets = self._api.load_markets(params={})
self._load_async_markets()
self._last_markets_refresh = arrow.utcnow().int_timestamp
self._last_markets_refresh = dt_ts()
if self._ft_has['needs_trading_fees']:
self._trading_fees = self.fetch_trading_fees()
@@ -497,15 +502,14 @@ class Exchange:
"""Reload markets both sync and async if refresh interval has passed """
# Check whether markets have to be reloaded
if (self._last_markets_refresh > 0) and (
self._last_markets_refresh + self.markets_refresh_interval
> arrow.utcnow().int_timestamp):
self._last_markets_refresh + self.markets_refresh_interval > dt_ts()):
return None
logger.debug("Performing scheduled market reload..")
try:
self._markets = self._api.load_markets(reload=True, params={})
# Also reload async markets to avoid issues with newly listed pairs
self._load_async_markets(reload=True)
self._last_markets_refresh = arrow.utcnow().int_timestamp
self._last_markets_refresh = dt_ts()
self.fill_leverage_tiers()
except ccxt.BaseError:
logger.exception("Could not reload markets.")
@@ -839,7 +843,8 @@ class Exchange:
def create_dry_run_order(self, pair: str, ordertype: str, side: str, amount: float,
rate: float, leverage: float, params: Dict = {},
stop_loss: bool = False) -> Dict[str, Any]:
order_id = f'dry_run_{side}_{datetime.now().timestamp()}'
now = dt_now()
order_id = f'dry_run_{side}_{now.timestamp()}'
# Rounding here must respect to contract sizes
_amount = self._contracts_to_amount(
pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)))
@@ -854,8 +859,8 @@ class Exchange:
'side': side,
'filled': 0,
'remaining': _amount,
'datetime': arrow.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'timestamp': arrow.utcnow().int_timestamp * 1000,
'datetime': now.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'timestamp': dt_ts(now),
'status': "open",
'fee': None,
'info': {},
@@ -863,7 +868,7 @@ class Exchange:
}
if stop_loss:
dry_order["info"] = {"stopPrice": dry_order["price"]}
dry_order["stopPrice"] = dry_order["price"]
dry_order[self._ft_has['stop_price_param']] = dry_order["price"]
# Workaround to avoid filling stoploss orders immediately
dry_order["ft_order_type"] = "stoploss"
orderbook: Optional[OrderBook] = None
@@ -1015,7 +1020,7 @@ class Exchange:
from freqtrade.persistence import Order
order = Order.order_by_id(order_id)
if order:
ccxt_order = order.to_ccxt_object()
ccxt_order = order.to_ccxt_object(self._ft_has['stop_price_param'])
self._dry_run_open_orders[order_id] = ccxt_order
return ccxt_order
# Gracefully handle errors with dry-run orders.
@@ -1143,8 +1148,8 @@ class Exchange:
else:
limit_rate = stop_price * (2 - limit_price_pct)
bad_stop_price = ((stop_price <= limit_rate) if side ==
"sell" else (stop_price >= limit_rate))
bad_stop_price = ((stop_price < limit_rate) if side ==
"sell" else (stop_price > limit_rate))
# Ensure rate is less than stop price
if bad_stop_price:
# This can for example happen if the stop / liquidation price is set to 0
@@ -1428,6 +1433,47 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
@retrier(retries=0)
def fetch_orders(self, pair: str, since: datetime) -> List[Dict]:
"""
Fetch all orders for a pair "since"
:param pair: Pair for the query
:param since: Starting time for the query
"""
if self._config['dry_run']:
return []
def fetch_orders_emulate() -> List[Dict]:
orders = []
if self.exchange_has('fetchClosedOrders'):
orders = self._api.fetch_closed_orders(pair, since=since_ms)
if self.exchange_has('fetchOpenOrders'):
orders_open = self._api.fetch_open_orders(pair, since=since_ms)
orders.extend(orders_open)
return orders
try:
since_ms = int((since.timestamp() - 10) * 1000)
if self.exchange_has('fetchOrders'):
try:
orders: List[Dict] = self._api.fetch_orders(pair, since=since_ms)
except ccxt.NotSupported:
# Some exchanges don't support fetchOrders
# attempt to fetch open and closed orders separately
orders = fetch_orders_emulate()
else:
orders = fetch_orders_emulate()
self._log_exchange_response('fetch_orders', orders)
orders = [self._order_contracts_to_amount(o) for o in orders]
return orders
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not fetch positions due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
@retrier
def fetch_trading_fees(self) -> Dict[str, Any]:
"""
@@ -1616,39 +1662,18 @@ class Exchange:
price_side = self._get_price_side(side, is_short, conf_strategy)
price_side_word = price_side.capitalize()
if conf_strategy.get('use_order_book', False):
order_book_top = conf_strategy.get('order_book_top', 1)
if order_book is None:
order_book = self.fetch_l2_order_book(pair, order_book_top)
logger.debug('order_book %s', order_book)
# top 1 = index 0
try:
obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks'
rate = order_book[obside][order_book_top - 1][0]
except (IndexError, KeyError) as e:
logger.warning(
f"{pair} - {name} Price at location {order_book_top} from orderbook "
f"could not be determined. Orderbook: {order_book}"
)
raise PricingError from e
logger.debug(f"{pair} - {name} price from orderbook {price_side_word}"
f"side - top {order_book_top} order book {side} rate {rate:.8f}")
rate = self._get_rate_from_ob(pair, side, order_book, name, price_side,
order_book_top)
else:
logger.debug(f"Using Last {price_side_word} / Last Price")
logger.debug(f"Using Last {price_side.capitalize()} / Last Price")
if ticker is None:
ticker = self.fetch_ticker(pair)
ticker_rate = ticker[price_side]
if ticker['last'] and ticker_rate:
if side == 'entry' and ticker_rate > ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate)
elif side == 'exit' and ticker_rate < ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last'])
rate = ticker_rate
rate = self._get_rate_from_ticker(side, ticker, conf_strategy, price_side)
if rate is None:
raise PricingError(f"{name}-Rate for {pair} was empty.")
@@ -1657,6 +1682,43 @@ class Exchange:
return rate
def _get_rate_from_ticker(self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any],
price_side: BidAsk) -> Optional[float]:
"""
Get rate from ticker.
"""
ticker_rate = ticker[price_side]
if ticker['last'] and ticker_rate:
if side == 'entry' and ticker_rate > ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate)
elif side == 'exit' and ticker_rate < ticker['last']:
balance = conf_strategy.get('price_last_balance', 0.0)
ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last'])
rate = ticker_rate
return rate
def _get_rate_from_ob(self, pair: str, side: EntryExit, order_book: OrderBook, name: str,
price_side: BidAsk, order_book_top: int) -> float:
"""
Get rate from orderbook
:raises: PricingError if rate could not be determined.
"""
logger.debug('order_book %s', order_book)
# top 1 = index 0
try:
obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks'
rate = order_book[obside][order_book_top - 1][0]
except (IndexError, KeyError) as e:
logger.warning(
f"{pair} - {name} Price at location {order_book_top} from orderbook "
f"could not be determined. Orderbook: {order_book}"
)
raise PricingError from e
logger.debug(f"{pair} - {name} price from orderbook {price_side.capitalize()}"
f"side - top {order_book_top} order book {side} rate {rate:.8f}")
return rate
def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]:
entry_rate = None
exit_rate = None
@@ -1885,11 +1947,11 @@ class Exchange:
logger.debug(
"one_call: %s msecs (%s)",
one_call,
arrow.utcnow().shift(seconds=one_call // 1000).humanize(only_distance=True)
dt_humanize(dt_now() - timedelta(milliseconds=one_call), only_distance=True)
)
input_coroutines = [self._async_get_candle_history(
pair, timeframe, candle_type, since) for since in
range(since_ms, until_ms or (arrow.utcnow().int_timestamp * 1000), one_call)]
range(since_ms, until_ms or dt_ts(), one_call)]
data: List = []
# Chunk requests into batches of 100 to avoid overwelming ccxt Throttling
@@ -2072,7 +2134,7 @@ class Exchange:
"""
try:
# Fetch OHLCV asynchronously
s = '(' + arrow.get(since_ms // 1000).isoformat() + ') ' if since_ms is not None else ''
s = '(' + dt_from_ts(since_ms).isoformat() + ') ' if since_ms is not None else ''
logger.debug(
"Fetching pair %s, %s, interval %s, since %s %s...",
pair, candle_type, timeframe, since_ms, s
@@ -2162,7 +2224,7 @@ class Exchange:
logger.debug(
"Fetching trades for pair %s, since %s %s...",
pair, since,
'(' + arrow.get(since // 1000).isoformat() + ') ' if since is not None else ''
'(' + dt_from_ts(since).isoformat() + ') ' if since is not None else ''
)
trades = await self._api_async.fetch_trades(pair, since=since, limit=1000)
trades = self._trades_contracts_to_amount(trades)
@@ -2371,12 +2433,12 @@ class Exchange:
# Must fetch the leverage tiers for each market separately
# * This is slow(~45s) on Okx, makes ~90 api calls to load all linear swap markets
markets = self.markets
symbols = []
for symbol, market in markets.items():
symbols = [
symbol for symbol, market in markets.items()
if (self.market_is_future(market)
and market['quote'] == self._config['stake_currency']):
symbols.append(symbol)
and market['quote'] == self._config['stake_currency'])
]
tiers: Dict[str, List[Dict]] = {}
@@ -2396,25 +2458,26 @@ class Exchange:
else:
logger.info("Using cached leverage_tiers.")
async def gather_results():
async def gather_results(input_coro):
return await asyncio.gather(*input_coro, return_exceptions=True)
for input_coro in chunks(coros, 100):
with self._loop_lock:
results = self.loop.run_until_complete(gather_results())
results = self.loop.run_until_complete(gather_results(input_coro))
for symbol, res in results:
tiers[symbol] = res
for res in results:
if isinstance(res, Exception):
logger.warning(f"Leverage tier exception: {repr(res)}")
continue
symbol, tier = res
tiers[symbol] = tier
if len(coros) > 0:
self.cache_leverage_tiers(tiers, self._config['stake_currency'])
logger.info(f"Done initializing {len(symbols)} markets.")
return tiers
else:
return {}
else:
return {}
return {}
def cache_leverage_tiers(self, tiers: Dict[str, List[Dict]], stake_currency: str) -> None:
@@ -2430,14 +2493,17 @@ class Exchange:
def load_cached_leverage_tiers(self, stake_currency: str) -> Optional[Dict[str, List[Dict]]]:
filename = self._config['datadir'] / "futures" / f"leverage_tiers_{stake_currency}.json"
if filename.is_file():
tiers = file_load_json(filename)
updated = tiers.get('updated')
if updated:
updated_dt = parser.parse(updated)
if updated_dt < datetime.now(timezone.utc) - timedelta(weeks=4):
logger.info("Cached leverage tiers are outdated. Will update.")
return None
return tiers['data']
try:
tiers = file_load_json(filename)
updated = tiers.get('updated')
if updated:
updated_dt = parser.parse(updated)
if updated_dt < datetime.now(timezone.utc) - timedelta(weeks=4):
logger.info("Cached leverage tiers are outdated. Will update.")
return None
return tiers['data']
except Exception:
logger.exception("Error loading cached leverage tiers. Refreshing.")
return None
def fill_leverage_tiers(self) -> None:
@@ -2892,8 +2958,8 @@ class Exchange:
if nominal_value >= tier['minNotional']:
return (tier['maintenanceMarginRate'], tier['maintAmt'])
raise OperationalException("nominal value can not be lower than 0")
raise ExchangeError("nominal value can not be lower than 0")
# The lowest notional_floor for any pair in fetch_leverage_tiers is always 0 because it
# describes the min amt for a tier, and the lowest tier will always go down to 0
else:
raise OperationalException(f"Cannot get maintenance ratio using {self.name}")
raise ExchangeError(f"Cannot get maintenance ratio using {self.name}")

View File

@@ -9,8 +9,11 @@ import ccxt
from ccxt import (DECIMAL_PLACES, ROUND, ROUND_DOWN, ROUND_UP, SIGNIFICANT_DIGITS, TICK_SIZE,
TRUNCATE, decimal_to_precision)
from freqtrade.exchange.common import BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED
from freqtrade.exchange.common import (BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED,
SUPPORTED_EXCHANGES)
from freqtrade.types import ValidExchangesType
from freqtrade.util import FtPrecise
from freqtrade.util.datetime_helpers import dt_from_ts, dt_ts
CcxtModuleType = Any
@@ -54,14 +57,41 @@ def validate_exchange(exchange: str) -> Tuple[bool, str]:
return True, ''
def validate_exchanges(all_exchanges: bool) -> List[Tuple[str, bool, str]]:
def _build_exchange_list_entry(
exchange_name: str, exchangeClasses: Dict[str, Any]) -> ValidExchangesType:
valid, comment = validate_exchange(exchange_name)
result: ValidExchangesType = {
'name': exchange_name,
'valid': valid,
'supported': exchange_name.lower() in SUPPORTED_EXCHANGES,
'comment': comment,
'trade_modes': [{'trading_mode': 'spot', 'margin_mode': ''}],
}
if resolved := exchangeClasses.get(exchange_name.lower()):
supported_modes = [{'trading_mode': 'spot', 'margin_mode': ''}] + [
{'trading_mode': tm.value, 'margin_mode': mm.value}
for tm, mm in resolved['class']._supported_trading_mode_margin_pairs
]
result.update({
'trade_modes': supported_modes,
})
return result
def list_available_exchanges(all_exchanges: bool) -> List[ValidExchangesType]:
"""
:return: List of tuples with exchangename, valid, reason.
"""
exchanges = ccxt_exchanges() if all_exchanges else available_exchanges()
exchanges_valid = [
(e, *validate_exchange(e)) for e in exchanges
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
subclassed = {e['name'].lower(): e for e in ExchangeResolver.search_all_objects({}, False)}
exchanges_valid: List[ValidExchangesType] = [
_build_exchange_list_entry(e, subclassed) for e in exchanges
]
return exchanges_valid
@@ -99,9 +129,8 @@ def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> d
if not date:
date = datetime.now(timezone.utc)
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, date.timestamp() * 1000,
ROUND_DOWN) // 1000
return datetime.fromtimestamp(new_timestamp, tz=timezone.utc)
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, dt_ts(date), ROUND_DOWN) // 1000
return dt_from_ts(new_timestamp)
def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
@@ -113,9 +142,8 @@ def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> d
"""
if not date:
date = datetime.now(timezone.utc)
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, date.timestamp() * 1000,
ROUND_UP) // 1000
return datetime.fromtimestamp(new_timestamp, tz=timezone.utc)
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, dt_ts(date), ROUND_UP) // 1000
return dt_from_ts(new_timestamp)
def date_minus_candles(

View File

@@ -33,7 +33,6 @@ class Gate(Exchange):
_ft_has_futures: Dict = {
"needs_trading_fees": True,
"marketOrderRequiresPrice": False,
"tickers_have_bid_ask": False,
"fee_cost_in_contracts": False, # Set explicitly to false for clarity
"order_props_in_contracts": ['amount', 'filled', 'remaining'],
"stop_price_type_field": "price_type",

View File

@@ -125,6 +125,20 @@ class Okx(Exchange):
params['posSide'] = self._get_posSide(side, reduceOnly)
return params
def __fetch_leverage_already_set(self, pair: str, leverage: float, side: BuySell) -> bool:
try:
res_lev = self._api.fetch_leverage(symbol=pair, params={
"mgnMode": self.margin_mode.value,
"posSide": self._get_posSide(side, False),
})
self._log_exchange_response('get_leverage', res_lev)
already_set = all(float(x['lever']) == leverage for x in res_lev['data'])
return already_set
except ccxt.BaseError:
# Assume all errors as "not set yet"
return False
@retrier
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
if self.trading_mode != TradingMode.SPOT and self.margin_mode is not None:
@@ -141,8 +155,11 @@ class Okx(Exchange):
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e
already_set = self.__fetch_leverage_already_set(pair, leverage, side)
if not already_set:
raise TemporaryError(
f'Could not set leverage due to {e.__class__.__name__}. Message: {e}'
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
@@ -169,6 +186,23 @@ class Okx(Exchange):
params['posSide'] = self._get_posSide(side, True)
return params
def _convert_stop_order(self, pair: str, order_id: str, order: Dict) -> Dict:
if (
order['status'] == 'closed'
and (real_order_id := order.get('info', {}).get('ordId')) is not None
):
# Once a order triggered, we fetch the regular followup order.
order_reg = self.fetch_order(real_order_id, pair)
self._log_exchange_response('fetch_stoploss_order1', order_reg)
order_reg['id_stop'] = order_reg['id']
order_reg['id'] = order_id
order_reg['type'] = 'stoploss'
order_reg['status_stop'] = 'triggered'
return order_reg
order = self._order_contracts_to_amount(order)
order['type'] = 'stoploss'
return order
def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
if self._config['dry_run']:
return self.fetch_dry_run_order(order_id)
@@ -177,7 +211,7 @@ class Okx(Exchange):
params1 = {'stop': True}
order_reg = self._api.fetch_order(order_id, pair, params=params1)
self._log_exchange_response('fetch_stoploss_order', order_reg)
return order_reg
return self._convert_stop_order(pair, order_id, order_reg)
except ccxt.OrderNotFound:
pass
params2 = {'stop': True, 'ordType': 'conditional'}
@@ -188,18 +222,7 @@ class Okx(Exchange):
orders_f = [order for order in orders if order['id'] == order_id]
if orders_f:
order = orders_f[0]
if (order['status'] == 'closed'
and (real_order_id := order.get('info', {}).get('ordId')) is not None):
# Once a order triggered, we fetch the regular followup order.
order_reg = self.fetch_order(real_order_id, pair)
self._log_exchange_response('fetch_stoploss_order1', order_reg)
order_reg['id_stop'] = order_reg['id']
order_reg['id'] = order_id
order_reg['type'] = 'stoploss'
order_reg['status_stop'] = 'triggered'
return order_reg
order['type'] = 'stoploss'
return order
return self._convert_stop_order(pair, order_id, order)
except ccxt.BaseError:
pass
raise RetryableOrderError(

View File

@@ -1,7 +1,7 @@
import logging
from enum import Enum
from gym import spaces
from gymnasium import spaces
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
@@ -94,9 +94,12 @@ class Base3ActionRLEnv(BaseEnvironment):
observation = self._get_observation()
# user can play with time if they want
truncated = False
self._update_history(info)
return observation, step_reward, self._done, info
return observation, step_reward, self._done, truncated, info
def is_tradesignal(self, action: int) -> bool:
"""

View File

@@ -1,7 +1,7 @@
import logging
from enum import Enum
from gym import spaces
from gymnasium import spaces
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
@@ -96,9 +96,12 @@ class Base4ActionRLEnv(BaseEnvironment):
observation = self._get_observation()
# user can play with time if they want
truncated = False
self._update_history(info)
return observation, step_reward, self._done, info
return observation, step_reward, self._done, truncated, info
def is_tradesignal(self, action: int) -> bool:
"""

View File

@@ -1,7 +1,7 @@
import logging
from enum import Enum
from gym import spaces
from gymnasium import spaces
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
@@ -101,10 +101,12 @@ class Base5ActionRLEnv(BaseEnvironment):
)
observation = self._get_observation()
# user can play with time if they want
truncated = False
self._update_history(info)
return observation, step_reward, self._done, info
return observation, step_reward, self._done, truncated, info
def is_tradesignal(self, action: int) -> bool:
"""

View File

@@ -2,13 +2,13 @@ import logging
import random
from abc import abstractmethod
from enum import Enum
from typing import Optional, Type, Union
from typing import List, Optional, Type, Union
import gym
import gymnasium as gym
import numpy as np
import pandas as pd
from gym import spaces
from gym.utils import seeding
from gymnasium import spaces
from gymnasium.utils import seeding
from pandas import DataFrame
@@ -127,12 +127,23 @@ class BaseEnvironment(gym.Env):
self.history: dict = {}
self.trade_history: list = []
def get_attr(self, attr: str):
"""
Returns the attribute of the environment
:param attr: attribute to return
:return: attribute
"""
return getattr(self, attr)
@abstractmethod
def set_action_space(self):
"""
Unique to the environment action count. Must be inherited.
"""
def action_masks(self) -> List[bool]:
return [self._is_valid(action.value) for action in self.actions]
def seed(self, seed: int = 1):
self.np_random, seed = seeding.np_random(seed)
return [seed]
@@ -172,7 +183,7 @@ class BaseEnvironment(gym.Env):
def reset_tensorboard_log(self):
self.tensorboard_metrics = {}
def reset(self):
def reset(self, seed=None):
"""
Reset is called at the beginning of every episode
"""
@@ -203,7 +214,7 @@ class BaseEnvironment(gym.Env):
self.close_trade_profit = []
self._total_unrealized_profit = 1
return self._get_observation()
return self._get_observation(), self.history
@abstractmethod
def step(self, action: int):
@@ -298,6 +309,12 @@ class BaseEnvironment(gym.Env):
"""
An example reward function. This is the one function that users will likely
wish to inject their own creativity into.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
:param action: int = The action made by the agent for the current candle.
:return:
float = the reward to give to the agent for current step (used for optimization

View File

@@ -6,24 +6,25 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
import gym
import gymnasium as gym
import numpy as np
import numpy.typing as npt
import pandas as pd
import torch as th
import torch.multiprocessing
from pandas import DataFrame
from stable_baselines3.common.callbacks import EvalCallback
from sb3_contrib.common.maskable.callbacks import MaskableEvalCallback
from sb3_contrib.common.maskable.utils import is_masking_supported
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.utils import set_random_seed
from stable_baselines3.common.vec_env import SubprocVecEnv
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment, Positions
from freqtrade.freqai.tensorboard.TensorboardCallback import TensorboardCallback
from freqtrade.persistence import Trade
@@ -46,9 +47,9 @@ class BaseReinforcementLearningModel(IFreqaiModel):
'cpu_count', 1), max(int(self.max_system_threads / 2), 1))
th.set_num_threads(self.max_threads)
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
self.train_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
self.eval_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
self.eval_callback: Optional[EvalCallback] = None
self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_callback: Optional[MaskableEvalCallback] = None
self.model_type = self.freqai_info['rl_config']['model_type']
self.rl_config = self.freqai_info['rl_config']
self.df_raw: DataFrame = DataFrame()
@@ -82,6 +83,9 @@ class BaseReinforcementLearningModel(IFreqaiModel):
if self.ft_params.get('use_DBSCAN_to_remove_outliers', False):
self.ft_params.update({'use_DBSCAN_to_remove_outliers': False})
logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.')
if self.ft_params.get('DI_threshold', False):
self.ft_params.update({'DI_threshold': False})
logger.warning('User tried to use DI_threshold with RL. Deactivating DI_threshold.')
if self.freqai_info['data_split_parameters'].get('shuffle', False):
self.freqai_info['data_split_parameters'].update({'shuffle': False})
logger.warning('User tried to shuffle training data. Setting shuffle to False')
@@ -107,27 +111,37 @@ class BaseReinforcementLearningModel(IFreqaiModel):
training_filter=True,
)
data_dictionary: Dict[str, Any] = dk.make_train_test_datasets(
dd: Dict[str, Any] = dk.make_train_test_datasets(
features_filtered, labels_filtered)
self.df_raw = copy.deepcopy(data_dictionary["train_features"])
self.df_raw = copy.deepcopy(dd["train_features"])
dk.fit_labels() # FIXME useless for now, but just satiating append methods
# normalize all data based on train_dataset only
prices_train, prices_test = self.build_ohlc_price_dataframes(dk.data_dictionary, pair, dk)
data_dictionary = dk.normalize_data(data_dictionary)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
# data cleaning/analysis
self.data_cleaning_train(dk)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
logger.info(
f'Training model on {len(dk.data_dictionary["train_features"].columns)}'
f' features and {len(data_dictionary["train_features"])} data points'
f' features and {len(dd["train_features"])} data points'
)
self.set_train_and_eval_environments(data_dictionary, prices_train, prices_test, dk)
self.set_train_and_eval_environments(dd, prices_train, prices_test, dk)
model = self.fit(data_dictionary, dk)
model = self.fit(dd, dk)
logger.info(f"--------------------done training {pair}--------------------")
@@ -151,9 +165,11 @@ class BaseReinforcementLearningModel(IFreqaiModel):
self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, **env_info)
self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, **env_info))
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df),
best_model_save_path=str(dk.data_path))
self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df),
best_model_save_path=str(dk.data_path),
use_masking=(self.model_type == 'MaskablePPO' and
is_masking_supported(self.eval_env)))
actions = self.train_env.get_actions()
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)
@@ -236,13 +252,10 @@ class BaseReinforcementLearningModel(IFreqaiModel):
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_dataframe = self.drop_ohlc_from_df(filtered_dataframe, dk)
dk.data_dictionary["prediction_features"] = self.drop_ohlc_from_df(filtered_dataframe, dk)
filtered_dataframe = dk.normalize_data_from_metadata(filtered_dataframe)
dk.data_dictionary["prediction_features"] = filtered_dataframe
# optional additional data cleaning/analysis
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], _, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
pred_df = self.rl_model_predict(
dk.data_dictionary["prediction_features"], dk, self.model)
@@ -371,6 +384,12 @@ class BaseReinforcementLearningModel(IFreqaiModel):
"""
An example reward function. This is the one function that users will likely
wish to inject their own creativity into.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
:param action: int = The action made by the agent for the current candle.
:return:
float = the reward to give to the agent for current step (used for optimization
@@ -431,9 +450,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
return 0.
def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
def make_env(MyRLEnv: Type[BaseEnvironment], env_id: str, rank: int,
seed: int, train_df: DataFrame, price: DataFrame,
monitor: bool = False,
env_info: Dict[str, Any] = {}) -> Callable:
"""
Utility function for multiprocessed env.
@@ -450,8 +468,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank,
**env_info)
if monitor:
env = Monitor(env)
return env
set_random_seed(seed)
return _init

View File

@@ -17,8 +17,8 @@ logger = logging.getLogger(__name__)
class BaseClassifierModel(IFreqaiModel):
"""
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
User *must* inherit from this class and set fit() and predict(). See example scripts
such as prediction_models/CatboostPredictionModel.py for guidance.
User *must* inherit from this class and set fit(). See example scripts
such as prediction_models/CatboostClassifier.py for guidance.
"""
def train(
@@ -50,21 +50,30 @@ class BaseClassifierModel(IFreqaiModel):
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date} --------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(data_dictionary, dk)
model = self.fit(dd, dk)
end_time = time()
@@ -89,10 +98,11 @@ class BaseClassifierModel(IFreqaiModel):
filtered_df, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
if self.CONV_WIDTH == 1:
@@ -107,4 +117,10 @@ class BaseClassifierModel(IFreqaiModel):
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)

View File

@@ -1,5 +1,6 @@
import logging
from typing import Dict, List, Tuple
from time import time
from typing import Any, Dict, List, Tuple
import numpy as np
import numpy.typing as npt
@@ -35,6 +36,7 @@ class BasePyTorchClassifier(BasePyTorchModel):
return dataframe
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.class_name_to_index = None
@@ -45,6 +47,7 @@ class BasePyTorchClassifier(BasePyTorchModel):
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
"""
Filter the prediction features data and predict with it.
:param dk: dk: The datakitchen object
:param unfiltered_df: Full dataframe for the current backtest period.
:return:
:pred_df: dataframe containing the predictions
@@ -67,20 +70,33 @@ class BasePyTorchClassifier(BasePyTorchModel):
filtered_df, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
x = self.data_convertor.convert_x(
dk.data_dictionary["prediction_features"],
device=self.device
)
self.model.model.eval()
logits = self.model.model(x)
probs = F.softmax(logits, dim=-1)
predicted_classes = torch.argmax(probs, dim=-1)
predicted_classes_str = self.decode_class_names(predicted_classes)
pred_df_prob = DataFrame(probs.detach().numpy(), columns=class_names)
# used .tolist to convert probs into an iterable, in this way Tensors
# are automatically moved to the CPU first if necessary.
pred_df_prob = DataFrame(probs.detach().tolist(), columns=class_names)
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)
def encode_class_names(
@@ -145,3 +161,58 @@ class BasePyTorchClassifier(BasePyTorchModel):
)
return self.class_names
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
# split data into train/test data.
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(dd, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model

View File

@@ -1,12 +1,8 @@
import logging
from abc import ABC, abstractmethod
from time import time
from typing import Any
import torch
from pandas import DataFrame
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
@@ -27,51 +23,7 @@ class BasePyTorchModel(IFreqaiModel, ABC):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size')
self.splits = ["train", "test"] if test_size != 0 else ["train"]
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
model = self.fit(data_dictionary, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model
self.window_size = self.freqai_info.get("conv_width", 1)
@property
@abstractmethod

View File

@@ -1,5 +1,6 @@
import logging
from typing import Tuple
from time import time
from typing import Any, Tuple
import numpy as np
import numpy.typing as npt
@@ -17,6 +18,7 @@ class BasePyTorchRegressor(BasePyTorchModel):
A PyTorch implementation of a regressor.
User must implement fit method
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@@ -36,15 +38,83 @@ class BasePyTorchRegressor(BasePyTorchModel):
filtered_df, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
x = self.data_convertor.convert_x(
dk.data_dictionary["prediction_features"],
device=self.device
)
self.model.model.eval()
y = self.model.model(x)
y = y.cpu()
pred_df = DataFrame(y.detach().numpy(), columns=[dk.label_list[0]])
pred_df = DataFrame(y.detach().tolist(), columns=[dk.label_list[0]])
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
# split data into train/test data.
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(dd, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model

View File

@@ -16,8 +16,8 @@ logger = logging.getLogger(__name__)
class BaseRegressionModel(IFreqaiModel):
"""
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
User *must* inherit from this class and set fit() and predict(). See example scripts
such as prediction_models/CatboostPredictionModel.py for guidance.
User *must* inherit from this class and set fit(). See example scripts
such as prediction_models/CatboostRegressor.py for guidance.
"""
def train(
@@ -49,21 +49,33 @@ class BaseRegressionModel(IFreqaiModel):
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date} --------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
dd = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
logger.info(f"Training model on {len(dd['train_features'])} data points")
model = self.fit(data_dictionary, dk)
model = self.fit(dd, dk)
end_time = time()
@@ -85,14 +97,12 @@ class BaseRegressionModel(IFreqaiModel):
"""
dk.find_features(unfiltered_df)
filtered_df, _ = dk.filter_features(
dk.data_dictionary["prediction_features"], _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
# optional additional data cleaning/analysis
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
predictions = self.model.predict(dk.data_dictionary["prediction_features"])
if self.CONV_WIDTH == 1:
@@ -100,6 +110,11 @@ class BaseRegressionModel(IFreqaiModel):
pred_df = DataFrame(predictions, columns=dk.label_list)
pred_df = dk.denormalize_labels_from_metadata(pred_df)
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if dk.feature_pipeline["di"]:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return (pred_df, dk.do_predict)

View File

@@ -1,70 +0,0 @@
import logging
from time import time
from typing import Any
from pandas import DataFrame
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel
logger = logging.getLogger(__name__)
class BaseTensorFlowModel(IFreqaiModel):
"""
Base class for TensorFlow type models.
User *must* inherit from this class and set fit() and predict().
"""
def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
) -> Any:
"""
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:param metadata: pair metadata from strategy.
:return:
:model: Trained model which can be used to inference (self.predict)
"""
logger.info(f"-------------------- Starting training {pair} --------------------")
start_time = time()
# filter the features requested by user in the configuration file and elegantly handle NaNs
features_filtered, labels_filtered = dk.filter_features(
unfiltered_df,
dk.training_features_list,
dk.label_list,
training_filter=True,
)
start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d")
end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d")
logger.info(f"-------------------- Training on data from {start_date} to "
f"{end_date} --------------------")
# split data into train/test data.
data_dictionary = dk.make_train_test_datasets(features_filtered, labels_filtered)
if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live:
dk.fit_labels()
# normalize all data based on train_dataset only
data_dictionary = dk.normalize_data(data_dictionary)
# optional additional data cleaning/analysis
self.data_cleaning_train(dk)
logger.info(
f"Training model on {len(dk.data_dictionary['train_features'].columns)} features"
)
logger.info(f"Training model on {len(data_dictionary['train_features'])} data points")
model = self.fit(data_dictionary, dk)
end_time = time()
logger.info(f"-------------------- Done training {pair} "
f"({end_time - start_time:.2f} secs) --------------------")
return model

View File

@@ -20,6 +20,7 @@ from pandas import DataFrame
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.data.history import load_pair_history
from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.strategy.interface import IStrategy
@@ -27,6 +28,11 @@ from freqtrade.strategy.interface import IStrategy
logger = logging.getLogger(__name__)
FEATURE_PIPELINE = "feature_pipeline"
LABEL_PIPELINE = "label_pipeline"
TRAINDF = "trained_df"
METADATA = "metadata"
class pair_info(TypedDict):
model_filename: str
@@ -424,7 +430,7 @@ class FreqaiDataDrawer:
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
dk.data["label_list"] = dk.label_list
with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp:
with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp:
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
return
@@ -449,39 +455,39 @@ class FreqaiDataDrawer:
elif self.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
model.save(save_path / f"{dk.model_filename}_model.zip")
if dk.svm_model is not None:
dump(dk.svm_model, save_path / f"{dk.model_filename}_svm_model.joblib")
dk.data["data_path"] = str(dk.data_path)
dk.data["model_filename"] = str(dk.model_filename)
dk.data["training_features_list"] = dk.training_features_list
dk.data["label_list"] = dk.label_list
# store the metadata
with (save_path / f"{dk.model_filename}_metadata.json").open("w") as fp:
with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp:
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
# save the train data to file so we can check preds for area of applicability later
# save the pipelines to pickle files
with (save_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("wb") as fp:
cloudpickle.dump(dk.feature_pipeline, fp)
with (save_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("wb") as fp:
cloudpickle.dump(dk.label_pipeline, fp)
# save the train data to file for post processing if desired
dk.data_dictionary["train_features"].to_pickle(
save_path / f"{dk.model_filename}_trained_df.pkl"
save_path / f"{dk.model_filename}_{TRAINDF}.pkl"
)
dk.data_dictionary["train_dates"].to_pickle(
save_path / f"{dk.model_filename}_trained_dates_df.pkl"
)
if self.freqai_info["feature_parameters"].get("principal_component_analysis"):
cloudpickle.dump(
dk.pca, (dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("wb")
)
self.model_dictionary[coin] = model
self.pair_dict[coin]["model_filename"] = dk.model_filename
self.pair_dict[coin]["data_path"] = str(dk.data_path)
if coin not in self.meta_data_dictionary:
self.meta_data_dictionary[coin] = {}
self.meta_data_dictionary[coin]["train_df"] = dk.data_dictionary["train_features"]
self.meta_data_dictionary[coin]["meta_data"] = dk.data
self.meta_data_dictionary[coin][METADATA] = dk.data
self.meta_data_dictionary[coin][FEATURE_PIPELINE] = dk.feature_pipeline
self.meta_data_dictionary[coin][LABEL_PIPELINE] = dk.label_pipeline
self.save_drawer_to_disk()
return
@@ -491,7 +497,7 @@ class FreqaiDataDrawer:
Load only metadata into datakitchen to increase performance during
presaved backtesting (prediction file loading).
"""
with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp:
with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp:
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
dk.training_features_list = dk.data["training_features_list"]
dk.label_list = dk.data["label_list"]
@@ -511,15 +517,17 @@ class FreqaiDataDrawer:
dk.data_path = Path(self.pair_dict[coin]["data_path"])
if coin in self.meta_data_dictionary:
dk.data = self.meta_data_dictionary[coin]["meta_data"]
dk.data_dictionary["train_features"] = self.meta_data_dictionary[coin]["train_df"]
dk.data = self.meta_data_dictionary[coin][METADATA]
dk.feature_pipeline = self.meta_data_dictionary[coin][FEATURE_PIPELINE]
dk.label_pipeline = self.meta_data_dictionary[coin][LABEL_PIPELINE]
else:
with (dk.data_path / f"{dk.model_filename}_metadata.json").open("r") as fp:
with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp:
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
dk.data_dictionary["train_features"] = pd.read_pickle(
dk.data_path / f"{dk.model_filename}_trained_df.pkl"
)
with (dk.data_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("rb") as fp:
dk.feature_pipeline = cloudpickle.load(fp)
with (dk.data_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("rb") as fp:
dk.label_pipeline = cloudpickle.load(fp)
dk.training_features_list = dk.data["training_features_list"]
dk.label_list = dk.data["label_list"]
@@ -529,9 +537,6 @@ class FreqaiDataDrawer:
model = self.model_dictionary[coin]
elif self.model_type == 'joblib':
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
elif self.model_type == 'keras':
from tensorflow import keras
model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5")
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
mod = importlib.import_module(
self.model_type, self.freqai_info['rl_config']['model_type'])
@@ -543,9 +548,6 @@ class FreqaiDataDrawer:
model = zip["pytrainer"]
model = model.load_from_checkpoint(zip)
if Path(dk.data_path / f"{dk.model_filename}_svm_model.joblib").is_file():
dk.svm_model = load(dk.data_path / f"{dk.model_filename}_svm_model.joblib")
if not model:
raise OperationalException(
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
@@ -555,11 +557,6 @@ class FreqaiDataDrawer:
if coin not in self.model_dictionary:
self.model_dictionary[coin] = model
if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]:
dk.pca = cloudpickle.load(
(dk.data_path / f"{dk.model_filename}_pca_object.pkl").open("rb")
)
return model
def update_historic_data(self, strategy: IStrategy, dk: FreqaiDataKitchen) -> None:
@@ -639,7 +636,7 @@ class FreqaiDataDrawer:
pair=pair,
timerange=timerange,
data_format=self.config.get("dataformat_ohlcv", "json"),
candle_type=self.config.get("trading_mode", "spot"),
candle_type=self.config.get("candle_type_def", CandleType.SPOT),
)
def get_base_and_corr_dataframes(

View File

@@ -4,7 +4,6 @@ import logging
import random
import shutil
from datetime import datetime, timezone
from math import cos, sin
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
@@ -12,16 +11,12 @@ import numpy as np
import numpy.typing as npt
import pandas as pd
import psutil
from datasieve.pipeline import Pipeline
from pandas import DataFrame
from scipy import stats
from sklearn import linear_model
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestNeighbors
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.constants import DOCS_LINK, Config
from freqtrade.data.converter import reduce_dataframe_footprint
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds
@@ -81,11 +76,12 @@ class FreqaiDataKitchen:
self.backtest_predictions_folder: str = "backtesting_predictions"
self.live = live
self.pair = pair
self.svm_model: linear_model.SGDOneClassSVM = None
self.keras: bool = self.freqai_config.get("keras", False)
self.set_all_pairs()
self.backtest_live_models = config.get("freqai_backtest_live_models", False)
self.feature_pipeline = Pipeline()
self.label_pipeline = Pipeline()
self.DI_values: npt.NDArray = np.array([])
if not self.live:
self.full_path = self.get_full_models_path(self.config)
@@ -227,13 +223,7 @@ class FreqaiDataKitchen:
drop_index = pd.isnull(filtered_df).any(axis=1) # get the rows that have NaNs,
drop_index = drop_index.replace(True, 1).replace(False, 0) # pep8 requirement.
if (training_filter):
const_cols = list((filtered_df.nunique() == 1).loc[lambda x: x].index)
if const_cols:
filtered_df = filtered_df.filter(filtered_df.columns.difference(const_cols))
self.data['constant_features_list'] = const_cols
logger.warning(f"Removed features {const_cols} with constant values.")
else:
self.data['constant_features_list'] = []
# we don't care about total row number (total no. datapoints) in training, we only care
# about removing any row with NaNs
# if labels has multiple columns (user wants to train multiple modelEs), we detect here
@@ -264,8 +254,7 @@ class FreqaiDataKitchen:
self.data["filter_drop_index_training"] = drop_index
else:
if 'constant_features_list' in self.data and len(self.data['constant_features_list']):
filtered_df = self.check_pred_labels(filtered_df)
# we are backtesting so we need to preserve row number to send back to strategy,
# so now we use do_predict to avoid any prediction based on a NaN
drop_index = pd.isnull(filtered_df).any(axis=1)
@@ -307,107 +296,6 @@ class FreqaiDataKitchen:
return self.data_dictionary
def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
"""
Normalize all data in the data_dictionary according to the training dataset
:param data_dictionary: dictionary containing the cleaned and
split training/test data/labels
:returns:
:data_dictionary: updated dictionary with standardized values.
"""
# standardize the data by training stats
train_max = data_dictionary["train_features"].max()
train_min = data_dictionary["train_features"].min()
data_dictionary["train_features"] = (
2 * (data_dictionary["train_features"] - train_min) / (train_max - train_min) - 1
)
data_dictionary["test_features"] = (
2 * (data_dictionary["test_features"] - train_min) / (train_max - train_min) - 1
)
for item in train_max.keys():
self.data[item + "_max"] = train_max[item]
self.data[item + "_min"] = train_min[item]
for item in data_dictionary["train_labels"].keys():
if data_dictionary["train_labels"][item].dtype == object:
continue
train_labels_max = data_dictionary["train_labels"][item].max()
train_labels_min = data_dictionary["train_labels"][item].min()
data_dictionary["train_labels"][item] = (
2
* (data_dictionary["train_labels"][item] - train_labels_min)
/ (train_labels_max - train_labels_min)
- 1
)
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
data_dictionary["test_labels"][item] = (
2
* (data_dictionary["test_labels"][item] - train_labels_min)
/ (train_labels_max - train_labels_min)
- 1
)
self.data[f"{item}_max"] = train_labels_max
self.data[f"{item}_min"] = train_labels_min
return data_dictionary
def normalize_single_dataframe(self, df: DataFrame) -> DataFrame:
train_max = df.max()
train_min = df.min()
df = (
2 * (df - train_min) / (train_max - train_min) - 1
)
for item in train_max.keys():
self.data[item + "_max"] = train_max[item]
self.data[item + "_min"] = train_min[item]
return df
def normalize_data_from_metadata(self, df: DataFrame) -> DataFrame:
"""
Normalize a set of data using the mean and standard deviation from
the associated training data.
:param df: Dataframe to be standardized
"""
train_max = [None] * len(df.keys())
train_min = [None] * len(df.keys())
for i, item in enumerate(df.keys()):
train_max[i] = self.data[f"{item}_max"]
train_min[i] = self.data[f"{item}_min"]
train_max_series = pd.Series(train_max, index=df.keys())
train_min_series = pd.Series(train_min, index=df.keys())
df = (
2 * (df - train_min_series) / (train_max_series - train_min_series) - 1
)
return df
def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame:
"""
Denormalize a set of data using the mean and standard deviation from
the associated training data.
:param df: Dataframe of predictions to be denormalized
"""
for label in df.columns:
if df[label].dtype == object or label in self.unique_class_list:
continue
df[label] = (
(df[label] + 1)
* (self.data[f"{label}_max"] - self.data[f"{label}_min"])
/ 2
) + self.data[f"{label}_min"]
return df
def split_timerange(
self, tr: str, train_split: int = 28, bt_split: float = 7
) -> Tuple[list, list]:
@@ -452,9 +340,7 @@ class FreqaiDataKitchen:
tr_training_list_timerange.append(copy.deepcopy(timerange_train))
# associated backtest period
timerange_backtest.startts = timerange_train.stopts
timerange_backtest.stopts = timerange_backtest.startts + int(bt_period)
if timerange_backtest.stopts > config_timerange.stopts:
@@ -485,426 +371,6 @@ class FreqaiDataKitchen:
return df
def check_pred_labels(self, df_predictions: DataFrame) -> DataFrame:
"""
Check that prediction feature labels match training feature labels.
:param df_predictions: incoming predictions
"""
constant_labels = self.data['constant_features_list']
df_predictions = df_predictions.filter(
df_predictions.columns.difference(constant_labels)
)
logger.warning(
f"Removed {len(constant_labels)} features from prediction features, "
f"these were considered constant values during most recent training."
)
return df_predictions
def principal_component_analysis(self) -> None:
"""
Performs Principal Component Analysis on the data for dimensionality reduction
and outlier detection (see self.remove_outliers())
No parameters or returns, it acts on the data_dictionary held by the DataHandler.
"""
from sklearn.decomposition import PCA # avoid importing if we dont need it
pca = PCA(0.999)
pca = pca.fit(self.data_dictionary["train_features"])
n_keep_components = pca.n_components_
self.data["n_kept_components"] = n_keep_components
n_components = self.data_dictionary["train_features"].shape[1]
logger.info("reduced feature dimension by %s", n_components - n_keep_components)
logger.info("explained variance %f", np.sum(pca.explained_variance_ratio_))
train_components = pca.transform(self.data_dictionary["train_features"])
self.data_dictionary["train_features"] = pd.DataFrame(
data=train_components,
columns=["PC" + str(i) for i in range(0, n_keep_components)],
index=self.data_dictionary["train_features"].index,
)
# normalsing transformed training features
self.data_dictionary["train_features"] = self.normalize_single_dataframe(
self.data_dictionary["train_features"])
# keeping a copy of the non-transformed features so we can check for errors during
# model load from disk
self.data["training_features_list_raw"] = copy.deepcopy(self.training_features_list)
self.training_features_list = self.data_dictionary["train_features"].columns
if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0:
test_components = pca.transform(self.data_dictionary["test_features"])
self.data_dictionary["test_features"] = pd.DataFrame(
data=test_components,
columns=["PC" + str(i) for i in range(0, n_keep_components)],
index=self.data_dictionary["test_features"].index,
)
# normalise transformed test feature to transformed training features
self.data_dictionary["test_features"] = self.normalize_data_from_metadata(
self.data_dictionary["test_features"])
self.data["n_kept_components"] = n_keep_components
self.pca = pca
logger.info(f"PCA reduced total features from {n_components} to {n_keep_components}")
if not self.data_path.is_dir():
self.data_path.mkdir(parents=True, exist_ok=True)
return None
def pca_transform(self, filtered_dataframe: DataFrame) -> None:
"""
Use an existing pca transform to transform data into components
:param filtered_dataframe: DataFrame = the cleaned dataframe
"""
pca_components = self.pca.transform(filtered_dataframe)
self.data_dictionary["prediction_features"] = pd.DataFrame(
data=pca_components,
columns=["PC" + str(i) for i in range(0, self.data["n_kept_components"])],
index=filtered_dataframe.index,
)
# normalise transformed predictions to transformed training features
self.data_dictionary["prediction_features"] = self.normalize_data_from_metadata(
self.data_dictionary["prediction_features"])
def compute_distances(self) -> float:
"""
Compute distances between each training point and every other training
point. This metric defines the neighborhood of trained data and is used
for prediction confidence in the Dissimilarity Index
"""
# logger.info("computing average mean distance for all training points")
pairwise = pairwise_distances(
self.data_dictionary["train_features"], n_jobs=self.thread_count)
# remove the diagonal distances which are itself distances ~0
np.fill_diagonal(pairwise, np.NaN)
pairwise = pairwise.reshape(-1, 1)
avg_mean_dist = pairwise[~np.isnan(pairwise)].mean()
return avg_mean_dist
def get_outlier_percentage(self, dropped_pts: npt.NDArray) -> float:
"""
Check if more than X% of points werer dropped during outlier detection.
"""
outlier_protection_pct = self.freqai_config["feature_parameters"].get(
"outlier_protection_percentage", 30)
outlier_pct = (dropped_pts.sum() / len(dropped_pts)) * 100
if outlier_pct >= outlier_protection_pct:
return outlier_pct
else:
return 0.0
def use_SVM_to_remove_outliers(self, predict: bool) -> None:
"""
Build/inference a Support Vector Machine to detect outliers
in training data and prediction
:param predict: bool = If true, inference an existing SVM model, else construct one
"""
if self.keras:
logger.warning(
"SVM outlier removal not currently supported for Keras based models. "
"Skipping user requested function."
)
if predict:
self.do_predict = np.ones(len(self.data_dictionary["prediction_features"]))
return
if predict:
if not self.svm_model:
logger.warning("No svm model available for outlier removal")
return
y_pred = self.svm_model.predict(self.data_dictionary["prediction_features"])
do_predict = np.where(y_pred == -1, 0, y_pred)
if (len(do_predict) - do_predict.sum()) > 0:
logger.info(f"SVM tossed {len(do_predict) - do_predict.sum()} predictions.")
self.do_predict += do_predict
self.do_predict -= 1
else:
# use SGDOneClassSVM to increase speed?
svm_params = self.freqai_config["feature_parameters"].get(
"svm_params", {"shuffle": False, "nu": 0.1})
self.svm_model = linear_model.SGDOneClassSVM(**svm_params).fit(
self.data_dictionary["train_features"]
)
y_pred = self.svm_model.predict(self.data_dictionary["train_features"])
kept_points = np.where(y_pred == -1, 0, y_pred)
# keep_index = np.where(y_pred == 1)
outlier_pct = self.get_outlier_percentage(1 - kept_points)
if outlier_pct:
logger.warning(
f"SVM detected {outlier_pct:.2f}% of the points as outliers. "
f"Keeping original dataset."
)
self.svm_model = None
return
self.data_dictionary["train_features"] = self.data_dictionary["train_features"][
(y_pred == 1)
]
self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
(y_pred == 1)
]
self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
(y_pred == 1)
]
logger.info(
f"SVM tossed {len(y_pred) - kept_points.sum()}"
f" train points from {len(y_pred)} total points."
)
# same for test data
# TODO: This (and the part above) could be refactored into a separate function
# to reduce code duplication
if self.freqai_config['data_split_parameters'].get('test_size', 0.1) != 0:
y_pred = self.svm_model.predict(self.data_dictionary["test_features"])
kept_points = np.where(y_pred == -1, 0, y_pred)
self.data_dictionary["test_features"] = self.data_dictionary["test_features"][
(y_pred == 1)
]
self.data_dictionary["test_labels"] = self.data_dictionary["test_labels"][(
y_pred == 1)]
self.data_dictionary["test_weights"] = self.data_dictionary["test_weights"][
(y_pred == 1)
]
logger.info(
f"{self.pair}: SVM tossed {len(y_pred) - kept_points.sum()}"
f" test points from {len(y_pred)} total points."
)
return
def use_DBSCAN_to_remove_outliers(self, predict: bool, eps=None) -> None:
"""
Use DBSCAN to cluster training data and remove "noisy" data (read outliers).
User controls this via the config param `DBSCAN_outlier_pct` which indicates the
pct of training data that they want to be considered outliers.
:param predict: bool = If False (training), iterate to find the best hyper parameters
to match user requested outlier percent target.
If True (prediction), use the parameters determined from
the previous training to estimate if the current prediction point
is an outlier.
"""
if predict:
if not self.data['DBSCAN_eps']:
return
train_ft_df = self.data_dictionary['train_features']
pred_ft_df = self.data_dictionary['prediction_features']
num_preds = len(pred_ft_df)
df = pd.concat([train_ft_df, pred_ft_df], axis=0, ignore_index=True)
clustering = DBSCAN(eps=self.data['DBSCAN_eps'],
min_samples=self.data['DBSCAN_min_samples'],
n_jobs=self.thread_count
).fit(df)
do_predict = np.where(clustering.labels_[-num_preds:] == -1, 0, 1)
if (len(do_predict) - do_predict.sum()) > 0:
logger.info(f"DBSCAN tossed {len(do_predict) - do_predict.sum()} predictions")
self.do_predict += do_predict
self.do_predict -= 1
else:
def normalise_distances(distances):
normalised_distances = (distances - distances.min()) / \
(distances.max() - distances.min())
return normalised_distances
def rotate_point(origin, point, angle):
# rotate a point counterclockwise by a given angle (in radians)
# around a given origin
x = origin[0] + cos(angle) * (point[0] - origin[0]) - \
sin(angle) * (point[1] - origin[1])
y = origin[1] + sin(angle) * (point[0] - origin[0]) + \
cos(angle) * (point[1] - origin[1])
return (x, y)
MinPts = int(len(self.data_dictionary['train_features'].index) * 0.25)
# measure pairwise distances to nearest neighbours
neighbors = NearestNeighbors(
n_neighbors=MinPts, n_jobs=self.thread_count)
neighbors_fit = neighbors.fit(self.data_dictionary['train_features'])
distances, _ = neighbors_fit.kneighbors(self.data_dictionary['train_features'])
distances = np.sort(distances, axis=0).mean(axis=1)
normalised_distances = normalise_distances(distances)
x_range = np.linspace(0, 1, len(distances))
line = np.linspace(normalised_distances[0],
normalised_distances[-1], len(normalised_distances))
deflection = np.abs(normalised_distances - line)
max_deflection_loc = np.where(deflection == deflection.max())[0][0]
origin = x_range[max_deflection_loc], line[max_deflection_loc]
point = x_range[max_deflection_loc], normalised_distances[max_deflection_loc]
rot_angle = np.pi / 4
elbow_loc = rotate_point(origin, point, rot_angle)
epsilon = elbow_loc[1] * (distances[-1] - distances[0]) + distances[0]
clustering = DBSCAN(eps=epsilon, min_samples=MinPts,
n_jobs=int(self.thread_count)).fit(
self.data_dictionary['train_features']
)
logger.info(f'DBSCAN found eps of {epsilon:.2f}.')
self.data['DBSCAN_eps'] = epsilon
self.data['DBSCAN_min_samples'] = MinPts
dropped_points = np.where(clustering.labels_ == -1, 1, 0)
outlier_pct = self.get_outlier_percentage(dropped_points)
if outlier_pct:
logger.warning(
f"DBSCAN detected {outlier_pct:.2f}% of the points as outliers. "
f"Keeping original dataset."
)
self.data['DBSCAN_eps'] = 0
return
self.data_dictionary['train_features'] = self.data_dictionary['train_features'][
(clustering.labels_ != -1)
]
self.data_dictionary["train_labels"] = self.data_dictionary["train_labels"][
(clustering.labels_ != -1)
]
self.data_dictionary["train_weights"] = self.data_dictionary["train_weights"][
(clustering.labels_ != -1)
]
logger.info(
f"DBSCAN tossed {dropped_points.sum()}"
f" train points from {len(clustering.labels_)}"
)
return
def compute_inlier_metric(self, set_='train') -> None:
"""
Compute inlier metric from backwards distance distributions.
This metric defines how well features from a timepoint fit
into previous timepoints.
"""
def normalise(dataframe: DataFrame, key: str) -> DataFrame:
if set_ == 'train':
min_value = dataframe.min()
max_value = dataframe.max()
self.data[f'{key}_min'] = min_value
self.data[f'{key}_max'] = max_value
else:
min_value = self.data[f'{key}_min']
max_value = self.data[f'{key}_max']
return (dataframe - min_value) / (max_value - min_value)
no_prev_pts = self.freqai_config["feature_parameters"]["inlier_metric_window"]
if set_ == 'train':
compute_df = copy.deepcopy(self.data_dictionary['train_features'])
elif set_ == 'test':
compute_df = copy.deepcopy(self.data_dictionary['test_features'])
else:
compute_df = copy.deepcopy(self.data_dictionary['prediction_features'])
compute_df_reindexed = compute_df.reindex(
index=np.flip(compute_df.index)
)
pairwise = pd.DataFrame(
np.triu(
pairwise_distances(compute_df_reindexed, n_jobs=self.thread_count)
),
columns=compute_df_reindexed.index,
index=compute_df_reindexed.index
)
pairwise = pairwise.round(5)
column_labels = [
'{}{}'.format('d', i) for i in range(1, no_prev_pts + 1)
]
distances = pd.DataFrame(
columns=column_labels, index=compute_df.index
)
for index in compute_df.index[no_prev_pts:]:
current_row = pairwise.loc[[index]]
current_row_no_zeros = current_row.loc[
:, (current_row != 0).any(axis=0)
]
distances.loc[[index]] = current_row_no_zeros.iloc[
:, :no_prev_pts
]
distances = distances.replace([np.inf, -np.inf], np.nan)
drop_index = pd.isnull(distances).any(axis=1)
distances = distances[drop_index == 0]
inliers = pd.DataFrame(index=distances.index)
for key in distances.keys():
current_distances = distances[key].dropna()
current_distances = normalise(current_distances, key)
if set_ == 'train':
fit_params = stats.weibull_min.fit(current_distances)
self.data[f'{key}_fit_params'] = fit_params
else:
fit_params = self.data[f'{key}_fit_params']
quantiles = stats.weibull_min.cdf(current_distances, *fit_params)
df_inlier = pd.DataFrame(
{key: quantiles}, index=distances.index
)
inliers = pd.concat(
[inliers, df_inlier], axis=1
)
inlier_metric = pd.DataFrame(
data=inliers.sum(axis=1) / no_prev_pts,
columns=['%-inlier_metric'],
index=compute_df.index
)
inlier_metric = (2 * (inlier_metric - inlier_metric.min()) /
(inlier_metric.max() - inlier_metric.min()) - 1)
if set_ in ('train', 'test'):
inlier_metric = inlier_metric.iloc[no_prev_pts:]
compute_df = compute_df.iloc[no_prev_pts:]
self.remove_beginning_points_from_data_dict(set_, no_prev_pts)
self.data_dictionary[f'{set_}_features'] = pd.concat(
[compute_df, inlier_metric], axis=1)
else:
self.data_dictionary['prediction_features'] = pd.concat(
[compute_df, inlier_metric], axis=1)
self.data_dictionary['prediction_features'].fillna(0, inplace=True)
logger.info('Inlier metric computed and added to features.')
return None
def remove_beginning_points_from_data_dict(self, set_='train', no_prev_pts: int = 10):
features = self.data_dictionary[f'{set_}_features']
weights = self.data_dictionary[f'{set_}_weights']
labels = self.data_dictionary[f'{set_}_labels']
self.data_dictionary[f'{set_}_weights'] = weights[no_prev_pts:]
self.data_dictionary[f'{set_}_features'] = features.iloc[no_prev_pts:]
self.data_dictionary[f'{set_}_labels'] = labels.iloc[no_prev_pts:]
def add_noise_to_training_features(self) -> None:
"""
Add noise to train features to reduce the risk of overfitting.
"""
mu = 0 # no shift
sigma = self.freqai_config["feature_parameters"]["noise_standard_deviation"]
compute_df = self.data_dictionary['train_features']
noise = np.random.normal(mu, sigma, [compute_df.shape[0], compute_df.shape[1]])
self.data_dictionary['train_features'] += noise
return
def find_features(self, dataframe: DataFrame) -> None:
"""
Find features in the strategy provided dataframe
@@ -925,37 +391,6 @@ class FreqaiDataKitchen:
labels = [c for c in column_names if "&" in c]
self.label_list = labels
def check_if_pred_in_training_spaces(self) -> None:
"""
Compares the distance from each prediction point to each training data
point. It uses this information to estimate a Dissimilarity Index (DI)
and avoid making predictions on any points that are too far away
from the training data set.
"""
distance = pairwise_distances(
self.data_dictionary["train_features"],
self.data_dictionary["prediction_features"],
n_jobs=self.thread_count,
)
self.DI_values = distance.min(axis=0) / self.data["avg_mean_dist"]
do_predict = np.where(
self.DI_values < self.freqai_config["feature_parameters"]["DI_threshold"],
1,
0,
)
if (len(do_predict) - do_predict.sum()) > 0:
logger.info(
f"{self.pair}: DI tossed {len(do_predict) - do_predict.sum()} predictions for "
"being too far from training data."
)
self.do_predict += do_predict
self.do_predict -= 1
def set_weights_higher_recent(self, num_weights: int) -> npt.ArrayLike:
"""
Set weights so that recent data is more heavily weighted during
@@ -1325,9 +760,9 @@ class FreqaiDataKitchen:
" which was deprecated on March 1, 2023. Please refer "
"to the strategy migration guide to use the new "
"feature_engineering_* methods: \n"
"https://www.freqtrade.io/en/stable/strategy_migration/#freqai-strategy \n"
f"{DOCS_LINK}/strategy_migration/#freqai-strategy \n"
"And the feature_engineering_* documentation: \n"
"https://www.freqtrade.io/en/latest/freqai-feature-engineering/"
f"{DOCS_LINK}/freqai-feature-engineering/"
)
tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes")
@@ -1515,3 +950,32 @@ class FreqaiDataKitchen:
timerange.startts += buffer * timeframe_to_seconds(self.config["timeframe"])
return timerange
# deprecated functions
def normalize_data(self, data_dictionary: Dict) -> Dict[Any, Any]:
"""
Deprecation warning, migration assistance
"""
logger.warning(f"Your custom IFreqaiModel relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline "
"We added a basic pipeline for you, but this will be removed "
"in a future version.")
return data_dictionary
def denormalize_labels_from_metadata(self, df: DataFrame) -> DataFrame:
"""
Deprecation warning, migration assistance
"""
logger.warning(f"Your custom IFreqaiModel relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline "
"We added a basic pipeline for you, but this will be removed "
"in a future version.")
pred_df, _, _ = self.label_pipeline.inverse_transform(df)
return pred_df

View File

@@ -7,21 +7,25 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Tuple
import datasieve.transforms as ds
import numpy as np
import pandas as pd
import psutil
from datasieve.pipeline import Pipeline
from datasieve.transforms import SKLearnWrapper
from numpy.typing import NDArray
from pandas import DataFrame
from sklearn.preprocessing import MinMaxScaler
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.constants import DOCS_LINK, Config
from freqtrade.data.dataprovider import DataProvider
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.utils import plot_feature_importance, record_params
from freqtrade.freqai.utils import get_tb_logger, plot_feature_importance, record_params
from freqtrade.strategy.interface import IStrategy
@@ -80,6 +84,7 @@ class IFreqaiModel(ABC):
if self.keras and self.ft_params.get("DI_threshold", 0):
self.ft_params["DI_threshold"] = 0
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
if self.ft_params.get("inlier_metric_window", 0):
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
@@ -109,6 +114,7 @@ class IFreqaiModel(ABC):
if self.ft_params.get('principal_component_analysis', False) and self.continual_learning:
self.ft_params.update({'principal_component_analysis': False})
logger.warning('User tried to use PCA with continual learning. Deactivating PCA.')
self.activate_tensorboard: bool = self.freqai_info.get('activate_tensorboard', True)
record_params(config, self.full_path)
@@ -242,8 +248,8 @@ class IFreqaiModel(ABC):
new_trained_timerange, pair, strategy, dk, data_load_timerange
)
except Exception as msg:
logger.warning(f"Training {pair} raised exception {msg.__class__.__name__}. "
f"Message: {msg}, skipping.")
logger.exception(f"Training {pair} raised exception {msg.__class__.__name__}. "
f"Message: {msg}, skipping.")
self.train_timer('stop', pair)
@@ -306,10 +312,11 @@ class IFreqaiModel(ABC):
if dk.check_if_backtest_prediction_is_valid(len_backtest_df):
if check_features:
self.dd.load_metadata(dk)
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators(
df_fts = self.dk.use_strategy_to_populate_indicators(
strategy, prediction_dataframe=dataframe.tail(1), pair=pair
)
dk.find_features(dataframe_dummy_features)
df_fts = dk.remove_special_chars_from_feature_names(df_fts)
dk.find_features(df_fts)
self.check_if_feature_list_matches_strategy(dk)
check_features = False
append_df = dk.get_backtesting_prediction()
@@ -342,7 +349,10 @@ class IFreqaiModel(ABC):
dk.find_labels(dataframe_train)
try:
self.tb_logger = get_tb_logger(self.dd.model_type, dk.data_path,
self.activate_tensorboard)
self.model = self.train(dataframe_train, pair, dk)
self.tb_logger.close()
except Exception as msg:
logger.warning(
f"Training {pair} raised exception {msg.__class__.__name__}. "
@@ -489,76 +499,51 @@ class IFreqaiModel(ABC):
if dk.training_features_list != feature_list:
raise OperationalException(
"Trying to access pretrained model with `identifier` "
"but found different features furnished by current strategy."
"Change `identifier` to train from scratch, or ensure the"
"strategy is furnishing the same features as the pretrained"
"but found different features furnished by current strategy. "
"Change `identifier` to train from scratch, or ensure the "
"strategy is furnishing the same features as the pretrained "
"model. In case of --strategy-list, please be aware that FreqAI "
"requires all strategies to maintain identical "
"feature_engineering_* functions"
)
def data_cleaning_train(self, dk: FreqaiDataKitchen) -> None:
"""
Base data cleaning method for train.
Functions here improve/modify the input data by identifying outliers,
computing additional metrics, adding noise, reducing dimensionality etc.
"""
def define_data_pipeline(self, threads=-1) -> Pipeline:
ft_params = self.freqai_info["feature_parameters"]
pipe_steps = [
('const', ds.VarianceThreshold(threshold=0)),
('scaler', SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1))))
]
if ft_params.get('inlier_metric_window', 0):
dk.compute_inlier_metric(set_='train')
if self.freqai_info["data_split_parameters"]["test_size"] > 0:
dk.compute_inlier_metric(set_='test')
if ft_params.get(
"principal_component_analysis", False
):
dk.principal_component_analysis()
if ft_params.get("principal_component_analysis", False):
pipe_steps.append(('pca', ds.PCA(n_components=0.999)))
pipe_steps.append(('post-pca-scaler',
SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1)))))
if ft_params.get("use_SVM_to_remove_outliers", False):
dk.use_SVM_to_remove_outliers(predict=False)
svm_params = ft_params.get(
"svm_params", {"shuffle": False, "nu": 0.01})
pipe_steps.append(('svm', ds.SVMOutlierExtractor(**svm_params)))
if ft_params.get("DI_threshold", 0):
dk.data["avg_mean_dist"] = dk.compute_distances()
di = ft_params.get("DI_threshold", 0)
if di:
pipe_steps.append(('di', ds.DissimilarityIndex(di_threshold=di, n_jobs=threads)))
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
if dk.pair in self.dd.old_DBSCAN_eps:
eps = self.dd.old_DBSCAN_eps[dk.pair]
else:
eps = None
dk.use_DBSCAN_to_remove_outliers(predict=False, eps=eps)
self.dd.old_DBSCAN_eps[dk.pair] = dk.data['DBSCAN_eps']
pipe_steps.append(('dbscan', ds.DBSCAN(n_jobs=threads)))
if self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0):
dk.add_noise_to_training_features()
sigma = self.freqai_info["feature_parameters"].get('noise_standard_deviation', 0)
if sigma:
pipe_steps.append(('noise', ds.Noise(sigma=sigma)))
def data_cleaning_predict(self, dk: FreqaiDataKitchen) -> None:
"""
Base data cleaning method for predict.
Functions here are complementary to the functions of data_cleaning_train.
"""
ft_params = self.freqai_info["feature_parameters"]
return Pipeline(pipe_steps)
# ensure user is feeding the correct indicators to the model
self.check_if_feature_list_matches_strategy(dk)
def define_label_pipeline(self, threads=-1) -> Pipeline:
if ft_params.get('inlier_metric_window', 0):
dk.compute_inlier_metric(set_='predict')
label_pipeline = Pipeline([
('scaler', SKLearnWrapper(MinMaxScaler(feature_range=(-1, 1))))
])
if ft_params.get(
"principal_component_analysis", False
):
dk.pca_transform(dk.data_dictionary['prediction_features'])
if ft_params.get("use_SVM_to_remove_outliers", False):
dk.use_SVM_to_remove_outliers(predict=True)
if ft_params.get("DI_threshold", 0):
dk.check_if_pred_in_training_spaces()
if ft_params.get("use_DBSCAN_to_remove_outliers", False):
dk.use_DBSCAN_to_remove_outliers(predict=True)
return label_pipeline
def model_exists(self, dk: FreqaiDataKitchen) -> bool:
"""
@@ -570,8 +555,6 @@ class IFreqaiModel(ABC):
"""
if self.dd.model_type == 'joblib':
file_type = ".joblib"
elif self.dd.model_type == 'keras':
file_type = ".h5"
elif self.dd.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
file_type = ".zip"
@@ -620,18 +603,23 @@ class IFreqaiModel(ABC):
strategy, corr_dataframes, base_dataframes, pair
)
new_trained_timerange = dk.buffer_timerange(new_trained_timerange)
trained_timestamp = new_trained_timerange.stopts
unfiltered_dataframe = dk.slice_dataframe(new_trained_timerange, unfiltered_dataframe)
buffered_timerange = dk.buffer_timerange(new_trained_timerange)
unfiltered_dataframe = dk.slice_dataframe(buffered_timerange, unfiltered_dataframe)
# find the features indicated by strategy and store in datakitchen
dk.find_features(unfiltered_dataframe)
dk.find_labels(unfiltered_dataframe)
self.tb_logger = get_tb_logger(self.dd.model_type, dk.data_path,
self.activate_tensorboard)
model = self.train(unfiltered_dataframe, pair, dk)
self.tb_logger.close()
self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts
dk.set_new_model_names(pair, new_trained_timerange.stopts)
self.dd.pair_dict[pair]["trained_timestamp"] = trained_timestamp
dk.set_new_model_names(pair, trained_timestamp)
self.dd.save_data(model, pair, dk)
if self.plot_features:
@@ -690,7 +678,7 @@ class IFreqaiModel(ABC):
# # for keras type models, the conv_window needs to be prepended so
# # viewing is correct in frequi
if self.freqai_info.get('keras', False) or self.ft_params.get('inlier_metric_window', 0):
if self.ft_params.get('inlier_metric_window', 0):
n_lost_points = self.freqai_info.get('conv_width', 2)
zeros_df = DataFrame(np.zeros((n_lost_points, len(hist_preds_df.columns))),
columns=hist_preds_df.columns)
@@ -980,3 +968,50 @@ class IFreqaiModel(ABC):
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (i.e. SVM and/or DI index)
"""
# deprecated functions
def data_cleaning_train(self, dk: FreqaiDataKitchen, pair: str):
"""
throw deprecation warning if this function is called
"""
logger.warning(f"Your model {self.__class__.__name__} relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline")
dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count)
dd = dk.data_dictionary
(dd["train_features"],
dd["train_labels"],
dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"],
dd["train_labels"],
dd["train_weights"])
(dd["test_features"],
dd["test_labels"],
dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"],
dd["test_labels"],
dd["test_weights"])
dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count)
dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"])
dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"])
return
def data_cleaning_predict(self, dk: FreqaiDataKitchen, pair: str):
"""
throw deprecation warning if this function is called
"""
logger.warning(f"Your model {self.__class__.__name__} relies on the deprecated"
" data pipeline. Please update your model to use the new data pipeline."
" This can be achieved by following the migration guide at "
f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline")
dd = dk.data_dictionary
dd["predict_features"], outliers, _ = dk.feature_pipeline.transform(
dd["predict_features"], outlier_check=True)
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
return

View File

@@ -74,16 +74,18 @@ class PyTorchMLPClassifier(BasePyTorchClassifier):
model.to(self.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
criterion = torch.nn.CrossEntropyLoss()
init_model = self.get_init_model(dk.pair)
trainer = PyTorchModelTrainer(
model=model,
optimizer=optimizer,
criterion=criterion,
model_meta_data={"class_names": class_names},
device=self.device,
init_model=init_model,
data_convertor=self.data_convertor,
**self.trainer_kwargs,
)
# check if continual_learning is activated, and retreive the model to continue training
trainer = self.get_init_model(dk.pair)
if trainer is None:
trainer = PyTorchModelTrainer(
model=model,
optimizer=optimizer,
criterion=criterion,
model_meta_data={"class_names": class_names},
device=self.device,
data_convertor=self.data_convertor,
tb_logger=self.tb_logger,
**self.trainer_kwargs,
)
trainer.fit(data_dictionary, self.splits)
return trainer

View File

@@ -69,15 +69,17 @@ class PyTorchMLPRegressor(BasePyTorchRegressor):
model.to(self.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
criterion = torch.nn.MSELoss()
init_model = self.get_init_model(dk.pair)
trainer = PyTorchModelTrainer(
model=model,
optimizer=optimizer,
criterion=criterion,
device=self.device,
init_model=init_model,
data_convertor=self.data_convertor,
**self.trainer_kwargs,
)
# check if continual_learning is activated, and retreive the model to continue training
trainer = self.get_init_model(dk.pair)
if trainer is None:
trainer = PyTorchModelTrainer(
model=model,
optimizer=optimizer,
criterion=criterion,
device=self.device,
data_convertor=self.data_convertor,
tb_logger=self.tb_logger,
**self.trainer_kwargs,
)
trainer.fit(data_dictionary, self.splits)
return trainer

View File

@@ -0,0 +1,146 @@
from typing import Any, Dict, Tuple
import numpy as np
import numpy.typing as npt
import pandas as pd
import torch
from freqtrade.freqai.base_models.BasePyTorchRegressor import BasePyTorchRegressor
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor,
PyTorchDataConvertor)
from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchTransformerTrainer
from freqtrade.freqai.torch.PyTorchTransformerModel import PyTorchTransformerModel
class PyTorchTransformerRegressor(BasePyTorchRegressor):
"""
This class implements the fit method of IFreqaiModel.
in the fit method we initialize the model and trainer objects.
the only requirement from the model is to be aligned to PyTorchRegressor
predict method that expects the model to predict tensor of type float.
the trainer defines the training loop.
parameters are passed via `model_training_parameters` under the freqai
section in the config file. e.g:
{
...
"freqai": {
...
"model_training_parameters" : {
"learning_rate": 3e-4,
"trainer_kwargs": {
"max_iters": 5000,
"batch_size": 64,
"max_n_eval_batches": null
},
"model_kwargs": {
"hidden_dim": 512,
"dropout_percent": 0.2,
"n_layer": 1,
},
}
}
}
"""
@property
def data_convertor(self) -> PyTorchDataConvertor:
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
config = self.freqai_info.get("model_training_parameters", {})
self.learning_rate: float = config.get("learning_rate", 3e-4)
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
"""
User sets up the training and test data to fit their desired model here
:param data_dictionary: the dictionary holding all data for train, test,
labels, weights
:param dk: The datakitchen object for the current coin/model
"""
n_features = data_dictionary["train_features"].shape[-1]
n_labels = data_dictionary["train_labels"].shape[-1]
model = PyTorchTransformerModel(
input_dim=n_features,
output_dim=n_labels,
time_window=self.window_size,
**self.model_kwargs
)
model.to(self.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
criterion = torch.nn.MSELoss()
# check if continual_learning is activated, and retreive the model to continue training
trainer = self.get_init_model(dk.pair)
if trainer is None:
trainer = PyTorchTransformerTrainer(
model=model,
optimizer=optimizer,
criterion=criterion,
device=self.device,
data_convertor=self.data_convertor,
window_size=self.window_size,
tb_logger=self.tb_logger,
**self.trainer_kwargs,
)
trainer.fit(data_dictionary, self.splits)
return trainer
def predict(
self, unfiltered_df: pd.DataFrame, dk: FreqaiDataKitchen, **kwargs
) -> Tuple[pd.DataFrame, npt.NDArray[np.int_]]:
"""
Filter the prediction features data and predict with it.
:param unfiltered_df: Full dataframe for the current backtest period.
:return:
:pred_df: dataframe containing the predictions
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (PCA and DI index)
"""
dk.find_features(unfiltered_df)
dk.data_dictionary["prediction_features"], _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform(
dk.data_dictionary["prediction_features"], outlier_check=True)
x = self.data_convertor.convert_x(
dk.data_dictionary["prediction_features"],
device=self.device
)
# if user is asking for multiple predictions, slide the window
# along the tensor
x = x.unsqueeze(0)
# create empty torch tensor
self.model.model.eval()
yb = torch.empty(0).to(self.device)
if x.shape[1] > 1:
ws = self.window_size
for i in range(0, x.shape[1] - ws):
xb = x[:, i:i + ws, :].to(self.device)
y = self.model.model(xb)
yb = torch.cat((yb, y), dim=0)
else:
yb = self.model.model(x)
yb = yb.cpu().squeeze()
pred_df = pd.DataFrame(yb.detach().numpy(), columns=dk.label_list)
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)
if self.freqai_info.get("DI_threshold", 0) > 0:
dk.DI_values = dk.feature_pipeline["di"].di_values
else:
dk.DI_values = np.zeros(outliers.shape[0])
dk.do_predict = outliers
if x.shape[1] > 1:
zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))),
columns=pred_df.columns)
pred_df = pd.concat([zeros_df, pred_df], axis=0, ignore_index=True)
return (pred_df, dk.do_predict)

View File

@@ -1,11 +1,12 @@
import logging
from pathlib import Path
from typing import Any, Dict
from typing import Any, Dict, Type
import torch as th
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
@@ -57,10 +58,14 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
policy_kwargs = dict(activation_fn=th.nn.ReLU,
net_arch=self.net_arch)
if self.activate_tensorboard:
tb_path = Path(dk.full_path / "tensorboard" / dk.pair.split('/')[0])
else:
tb_path = None
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
tensorboard_log=Path(
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
tensorboard_log=tb_path,
**self.freqai_info.get('model_training_parameters', {})
)
else:
@@ -84,7 +89,9 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
return model
class MyRLEnv(Base5ActionRLEnv):
MyRLEnv: Type[BaseEnvironment]
class MyRLEnv(Base5ActionRLEnv): # type: ignore[no-redef]
"""
User can override any function in BaseRLEnv and gym.Env. Here the user
sets a custom reward based on profit and trade duration.
@@ -94,6 +101,12 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
"""
An example reward function. This is the one function that users will likely
wish to inject their own creativity into.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
:param action: int = The action made by the agent for the current candle.
:return:
float = the reward to give to the agent for current step (used for optimization

View File

@@ -2,13 +2,14 @@ import logging
from typing import Any, Dict
from pandas import DataFrame
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.vec_env import SubprocVecEnv
from sb3_contrib.common.maskable.callbacks import MaskableEvalCallback
from sb3_contrib.common.maskable.utils import is_masking_supported
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback
from freqtrade.freqai.tensorboard.TensorboardCallback import TensorboardCallback
logger = logging.getLogger(__name__)
@@ -41,22 +42,27 @@ class ReinforcementLearner_multiproc(ReinforcementLearner):
env_info = self.pack_env_dict(dk.pair)
eval_freq = len(train_df) // self.max_threads
env_id = "train_env"
self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
train_df, prices_train,
monitor=True,
env_info=env_info) for i
in range(self.max_threads)])
self.train_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
train_df, prices_train,
env_info=env_info) for i
in range(self.max_threads)]))
eval_env_id = 'eval_env'
self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
test_df, prices_test,
monitor=True,
env_info=env_info) for i
in range(self.max_threads)])
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df),
best_model_save_path=str(dk.data_path))
self.eval_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
test_df, prices_test,
env_info=env_info) for i
in range(self.max_threads)]))
self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=eval_freq,
best_model_save_path=str(dk.data_path),
use_masking=(self.model_type == 'MaskablePPO' and
is_masking_supported(self.eval_env)))
# TENSORBOARD CALLBACK DOES NOT RECOMMENDED TO USE WITH MULTIPLE ENVS,
# IT WILL RETURN FALSE INFORMATIONS, NEVERTHLESS NOT THREAD SAFE WITH SB3!!!
actions = self.train_env.env_method("get_actions")[0]
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)

View File

@@ -5,6 +5,7 @@ from xgboost import XGBRFRegressor
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.tensorboard import TBCallback
logger = logging.getLogger(__name__)
@@ -44,7 +45,10 @@ class XGBoostRFRegressor(BaseRegressionModel):
model = XGBRFRegressor(**self.model_training_parameters)
model.set_params(callbacks=[TBCallback(dk.data_path)], activate=self.activate_tensorboard)
model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set,
sample_weight_eval_set=eval_weights, xgb_model=xgb_model)
# set the callbacks to empty so that we can serialize to disk later
model.set_params(callbacks=[])
return model

View File

@@ -5,6 +5,7 @@ from xgboost import XGBRegressor
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.tensorboard import TBCallback
logger = logging.getLogger(__name__)
@@ -44,7 +45,10 @@ class XGBoostRegressor(BaseRegressionModel):
model = XGBRegressor(**self.model_training_parameters)
model.set_params(callbacks=[TBCallback(dk.data_path)], activate=self.activate_tensorboard)
model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set,
sample_weight_eval_set=eval_weights, xgb_model=xgb_model)
# set the callbacks to empty so that we can serialize to disk later
model.set_params(callbacks=[])
return model

View File

@@ -3,8 +3,9 @@ from typing import Any, Dict, Type, Union
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.logger import HParam
from stable_baselines3.common.vec_env import VecEnv
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment
from freqtrade.freqai.RL.BaseEnvironment import BaseActions
class TensorboardCallback(BaseCallback):
@@ -12,11 +13,13 @@ class TensorboardCallback(BaseCallback):
Custom callback for plotting additional values in tensorboard and
episodic summary reports.
"""
# Override training_env type to fix type errors
training_env: Union[VecEnv, None] = None
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
super().__init__(verbose)
self.model: Any = None
self.logger = None # type: Any
self.training_env: BaseEnvironment = None # type: ignore
self.logger: Any = None
self.actions: Type[Enum] = actions
def _on_training_start(self) -> None:
@@ -44,6 +47,8 @@ class TensorboardCallback(BaseCallback):
def _on_step(self) -> bool:
local_info = self.locals["infos"][0]
if self.training_env is None:
return True
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
for metric in local_info:

View File

@@ -0,0 +1,15 @@
# ensure users can still use a non-torch freqai version
try:
from freqtrade.freqai.tensorboard.tensorboard import TensorBoardCallback, TensorboardLogger
TBLogger = TensorboardLogger
TBCallback = TensorBoardCallback
except ModuleNotFoundError:
from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback,
BaseTensorboardLogger)
TBLogger = BaseTensorboardLogger # type: ignore
TBCallback = BaseTensorBoardCallback # type: ignore
__all__ = (
"TBLogger",
"TBCallback"
)

View File

@@ -0,0 +1,33 @@
import logging
from pathlib import Path
from typing import Any
from xgboost.callback import TrainingCallback
logger = logging.getLogger(__name__)
class BaseTensorboardLogger:
def __init__(self, logdir: Path, activate: bool = True):
pass
def log_scalar(self, tag: str, scalar_value: Any, step: int):
return
def close(self):
return
class BaseTensorBoardCallback(TrainingCallback):
def __init__(self, logdir: Path, activate: bool = True):
pass
def after_iteration(
self, model, epoch: int, evals_log: TrainingCallback.EvalsLog
) -> bool:
return False
def after_training(self, model):
return model

View File

@@ -0,0 +1,62 @@
import logging
from pathlib import Path
from typing import Any
from torch.utils.tensorboard import SummaryWriter
from xgboost import callback
from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback,
BaseTensorboardLogger)
logger = logging.getLogger(__name__)
class TensorboardLogger(BaseTensorboardLogger):
def __init__(self, logdir: Path, activate: bool = True):
self.activate = activate
if self.activate:
self.writer: SummaryWriter = SummaryWriter(f"{str(logdir)}/tensorboard")
def log_scalar(self, tag: str, scalar_value: Any, step: int):
if self.activate:
self.writer.add_scalar(tag, scalar_value, step)
def close(self):
if self.activate:
self.writer.flush()
self.writer.close()
class TensorBoardCallback(BaseTensorBoardCallback):
def __init__(self, logdir: Path, activate: bool = True):
self.activate = activate
if self.activate:
self.writer: SummaryWriter = SummaryWriter(f"{str(logdir)}/tensorboard")
def after_iteration(
self, model, epoch: int, evals_log: callback.TrainingCallback.EvalsLog
) -> bool:
if not self.activate:
return False
if not evals_log:
return False
for data, metric in evals_log.items():
for metric_name, log in metric.items():
score = log[-1][0] if isinstance(log[-1], tuple) else log[-1]
if data == "train":
self.writer.add_scalar("train_loss", score, epoch)
else:
self.writer.add_scalar("valid_loss", score, epoch)
return False
def after_training(self, model):
if not self.activate:
return model
self.writer.flush()
self.writer.close()
return model

View File

@@ -1,5 +1,5 @@
from abc import ABC, abstractmethod
from typing import List, Optional
from typing import Optional
import pandas as pd
import torch
@@ -12,14 +12,14 @@ class PyTorchDataConvertor(ABC):
"""
@abstractmethod
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
"""
:param df: "*_features" dataframe.
:param device: The device to use for training (e.g. 'cpu', 'cuda').
"""
@abstractmethod
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
"""
:param df: "*_labels" dataframe.
:param device: The device to use for training (e.g. 'cpu', 'cuda').
@@ -45,14 +45,14 @@ class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
self._target_tensor_type = target_tensor_type
self._squeeze_target_tensor = squeeze_target_tensor
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
x = torch.from_numpy(df.values).float()
if device:
x = x.to(device)
return [x]
return x
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
y = torch.from_numpy(df.values)
if self._target_tensor_type:
@@ -64,4 +64,4 @@ class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
if device:
y = y.to(device)
return [y]
return y

View File

@@ -1,5 +1,4 @@
import logging
from typing import List
import torch
from torch import nn
@@ -47,8 +46,8 @@ class PyTorchMLPModel(nn.Module):
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_percent)
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor:
x: torch.Tensor = tensors[0]
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x: torch.Tensor = tensors[0]
x = self.relu(self.input_layer(x))
x = self.dropout(x)
x = self.blocks(x)

View File

@@ -12,6 +12,8 @@ from torch.utils.data import DataLoader, TensorDataset
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
from freqtrade.freqai.torch.PyTorchTrainerInterface import PyTorchTrainerInterface
from .datasets import WindowDataset
logger = logging.getLogger(__name__)
@@ -23,9 +25,10 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
optimizer: Optimizer,
criterion: nn.Module,
device: str,
init_model: Dict,
data_convertor: PyTorchDataConvertor,
model_meta_data: Dict[str, Any] = {},
window_size: int = 1,
tb_logger: Any = None,
**kwargs
):
"""
@@ -52,8 +55,8 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
self.batch_size: int = kwargs.get("batch_size", 64)
self.max_n_eval_batches: Optional[int] = kwargs.get("max_n_eval_batches", None)
self.data_convertor = data_convertor
if init_model:
self.load_from_checkpoint(init_model)
self.window_size: int = window_size
self.tb_logger = tb_logger
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]):
"""
@@ -75,36 +78,28 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
batch_size=self.batch_size,
n_iters=self.max_iters
)
self.model.train()
for epoch in range(1, epochs + 1):
# training
losses = []
for i, batch_data in enumerate(data_loaders_dictionary["train"]):
for tensor in batch_data:
tensor.to(self.device)
xb = batch_data[:-1]
yb = batch_data[-1]
xb, yb = batch_data
xb.to(self.device)
yb.to(self.device)
yb_pred = self.model(xb)
loss = self.criterion(yb_pred, yb)
self.optimizer.zero_grad(set_to_none=True)
loss.backward()
self.optimizer.step()
losses.append(loss.item())
train_loss = sum(losses) / len(losses)
log_message = f"epoch {epoch}/{epochs}: train loss {train_loss:.4f}"
self.tb_logger.log_scalar("train_loss", loss.item(), i)
# evaluation
if "test" in splits:
test_loss = self.estimate_loss(
self.estimate_loss(
data_loaders_dictionary,
self.max_n_eval_batches,
"test"
)
log_message += f" ; test loss {test_loss:.4f}"
logger.info(log_message)
@torch.no_grad()
def estimate_loss(
@@ -112,26 +107,22 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
data_loader_dictionary: Dict[str, DataLoader],
max_n_eval_batches: Optional[int],
split: str,
) -> float:
) -> None:
self.model.eval()
n_batches = 0
losses = []
for i, batch_data in enumerate(data_loader_dictionary[split]):
if max_n_eval_batches and i > max_n_eval_batches:
n_batches += 1
break
xb, yb = batch_data
xb.to(self.device)
yb.to(self.device)
for tensor in batch_data:
tensor.to(self.device)
xb = batch_data[:-1]
yb = batch_data[-1]
yb_pred = self.model(xb)
loss = self.criterion(yb_pred, yb)
losses.append(loss.item())
self.tb_logger.log_scalar(f"{split}_loss", loss.item(), i)
self.model.train()
return sum(losses) / len(losses)
def create_data_loaders_dictionary(
self,
@@ -145,7 +136,7 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
for split in splits:
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device)
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device)
dataset = TensorDataset(*x, *y)
dataset = TensorDataset(x, y)
data_loader = DataLoader(
dataset,
batch_size=self.batch_size,
@@ -206,3 +197,33 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.model_meta_data = checkpoint["model_meta_data"]
return self
class PyTorchTransformerTrainer(PyTorchModelTrainer):
"""
Creating a trainer for the Transformer model.
"""
def create_data_loaders_dictionary(
self,
data_dictionary: Dict[str, pd.DataFrame],
splits: List[str]
) -> Dict[str, DataLoader]:
"""
Converts the input data to PyTorch tensors using a data loader.
"""
data_loader_dictionary = {}
for split in splits:
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device)
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device)
dataset = WindowDataset(x, y, self.window_size)
data_loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
drop_last=True,
num_workers=0,
)
data_loader_dictionary[split] = data_loader
return data_loader_dictionary

View File

@@ -0,0 +1,93 @@
import math
import torch
from torch import nn
"""
The architecture is based on the paper “Attention Is All You Need”.
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017.
"""
class PyTorchTransformerModel(nn.Module):
"""
A transformer approach to time series modeling using positional encoding.
The architecture is based on the paper “Attention Is All You Need”.
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017.
"""
def __init__(self, input_dim: int = 7, output_dim: int = 7, hidden_dim=1024,
n_layer=2, dropout_percent=0.1, time_window=10, nhead=8):
super().__init__()
self.time_window = time_window
# ensure the input dimension to the transformer is divisible by nhead
self.dim_val = input_dim - (input_dim % nhead)
self.input_net = nn.Sequential(
nn.Dropout(dropout_percent), nn.Linear(input_dim, self.dim_val)
)
# Encode the timeseries with Positional encoding
self.positional_encoding = PositionalEncoding(d_model=self.dim_val, max_len=self.dim_val)
# Define the encoder block of the Transformer
self.encoder_layer = nn.TransformerEncoderLayer(
d_model=self.dim_val, nhead=nhead, dropout=dropout_percent, batch_first=True)
self.transformer = nn.TransformerEncoder(self.encoder_layer, num_layers=n_layer)
# the pseudo decoding FC
self.output_net = nn.Sequential(
nn.Linear(self.dim_val * time_window, int(hidden_dim)),
nn.ReLU(),
nn.Dropout(dropout_percent),
nn.Linear(int(hidden_dim), int(hidden_dim / 2)),
nn.ReLU(),
nn.Dropout(dropout_percent),
nn.Linear(int(hidden_dim / 2), int(hidden_dim / 4)),
nn.ReLU(),
nn.Dropout(dropout_percent),
nn.Linear(int(hidden_dim / 4), output_dim)
)
def forward(self, x, mask=None, add_positional_encoding=True):
"""
Args:
x: Input features of shape [Batch, SeqLen, input_dim]
mask: Mask to apply on the attention outputs (optional)
add_positional_encoding: If True, we add the positional encoding to the input.
Might not be desired for some tasks.
"""
x = self.input_net(x)
if add_positional_encoding:
x = self.positional_encoding(x)
x = self.transformer(x, mask=mask)
x = x.reshape(-1, 1, self.time_window * x.shape[-1])
x = self.output_net(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=5000):
"""
Args
d_model: Hidden dimensionality of the input.
max_len: Maximum length of a sequence to expect.
"""
super().__init__()
# Create matrix of [SeqLen, HiddenDim] representing the positional encoding
# for max_len inputs
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe, persistent=False)
def forward(self, x):
x = x + self.pe[:, : x.size(1)]
return x

View File

@@ -0,0 +1,19 @@
import torch
class WindowDataset(torch.utils.data.Dataset):
def __init__(self, xs, ys, window_size):
self.xs = xs
self.ys = ys
self.window_size = window_size
def __len__(self):
return len(self.xs) - self.window_size
def __getitem__(self, index):
idx_rev = len(self.xs) - self.window_size - index - 1
window_x = self.xs[idx_rev:idx_rev + self.window_size, :]
# Beware of indexing, these two window_x and window_y are aimed at the same row!
# this is what happens when you use :
window_y = self.ys[idx_rev + self.window_size - 1, :].unsqueeze(0)
return window_x, window_y

View File

@@ -92,55 +92,6 @@ def get_required_data_timerange(config: Config) -> TimeRange:
return data_load_timerange
# Keep below for when we wish to download heterogeneously lengthed data for FreqAI.
# def download_all_data_for_training(dp: DataProvider, config: Config) -> None:
# """
# Called only once upon start of bot to download the necessary data for
# populating indicators and training a FreqAI model.
# :param timerange: TimeRange = The full data timerange for populating the indicators
# and training the model.
# :param dp: DataProvider instance attached to the strategy
# """
# if dp._exchange is not None:
# markets = [p for p, m in dp._exchange.markets.items() if market_is_active(m)
# or config.get('include_inactive')]
# else:
# # This should not occur:
# raise OperationalException('No exchange object found.')
# all_pairs = dynamic_expand_pairlist(config, markets)
# if not dp._exchange:
# # Not realistic - this is only called in live mode.
# raise OperationalException("Dataprovider did not have an exchange attached.")
# time = datetime.now(tz=timezone.utc).timestamp()
# for tf in config["freqai"]["feature_parameters"].get("include_timeframes"):
# timerange = TimeRange()
# timerange.startts = int(time)
# timerange.stopts = int(time)
# startup_candles = dp.get_required_startup(str(tf))
# tf_seconds = timeframe_to_seconds(str(tf))
# timerange.subtract_start(tf_seconds * startup_candles)
# new_pairs_days = int((timerange.stopts - timerange.startts) / 86400)
# # FIXME: now that we are looping on `refresh_backtest_ohlcv_data`, the function
# # redownloads the funding rate for each pair.
# refresh_backtest_ohlcv_data(
# dp._exchange,
# pairs=all_pairs,
# timeframes=[tf],
# datadir=config["datadir"],
# timerange=timerange,
# new_pairs_days=new_pairs_days,
# erase=False,
# data_format=config.get("dataformat_ohlcv", "json"),
# trading_mode=config.get("trading_mode", "spot"),
# prepend=config.get("prepend_data", False),
# )
def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen,
count_max: int = 25) -> None:
"""
@@ -233,3 +184,13 @@ def get_timerange_backtest_live_models(config: Config) -> str:
dd = FreqaiDataDrawer(models_path, config)
timerange = dd.get_timerange_from_live_historic_predictions()
return timerange.timerange_str
def get_tb_logger(model_type: str, path: Path, activate: bool) -> Any:
if model_type == "pytorch" and activate:
from freqtrade.freqai.tensorboard import TBLogger
return TBLogger(path, activate)
else:
from freqtrade.freqai.tensorboard.base_tensorboard import BaseTensorboardLogger
return BaseTensorboardLogger(path, activate)

View File

@@ -1,9 +1,9 @@
"""
Freqtrade is the main module of this bot. It contains the class Freqtrade()
"""
import copy
import logging
import traceback
from copy import deepcopy
from datetime import datetime, time, timedelta, timezone
from math import isclose
from threading import Lock
@@ -13,7 +13,7 @@ from schedule import Scheduler
from freqtrade import constants
from freqtrade.configuration import validate_config_consistency
from freqtrade.constants import BuySell, Config, LongShort
from freqtrade.constants import BuySell, Config, ExchangeConfig, LongShort
from freqtrade.data.converter import order_book_to_dataframe
from freqtrade.data.dataprovider import DataProvider
from freqtrade.edge import Edge
@@ -23,6 +23,7 @@ from freqtrade.exceptions import (DependencyException, ExchangeError, Insufficie
InvalidOrderException, PricingError)
from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, timeframe_to_minutes, timeframe_to_next_date,
timeframe_to_seconds)
from freqtrade.exchange.common import remove_exchange_credentials
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
from freqtrade.mixins import LoggingMixin
from freqtrade.persistence import Order, PairLocks, Trade, init_db
@@ -63,6 +64,9 @@ class FreqtradeBot(LoggingMixin):
# Init objects
self.config = config
exchange_config: ExchangeConfig = deepcopy(config['exchange'])
# Remove credentials from original exchange config to avoid accidental credentail exposure
remove_exchange_credentials(config['exchange'], True)
self.strategy: IStrategy = StrategyResolver.load_strategy(self.config)
@@ -70,7 +74,7 @@ class FreqtradeBot(LoggingMixin):
validate_config_consistency(config)
self.exchange = ExchangeResolver.load_exchange(
self.config['exchange']['name'], self.config, load_leverage_tiers=True)
self.config, exchange_config=exchange_config, load_leverage_tiers=True)
init_db(self.config['db_url'])
@@ -227,7 +231,7 @@ class FreqtradeBot(LoggingMixin):
self.manage_open_orders()
# Protect from collisions with force_exit.
# Without this, freqtrade my try to recreate stoploss_on_exchange orders
# Without this, freqtrade may try to recreate stoploss_on_exchange orders
# while exiting is in process, since telegram messages arrive in an different thread.
with self._exit_lock:
trades = Trade.get_open_trades()
@@ -420,7 +424,7 @@ class FreqtradeBot(LoggingMixin):
"""
Try refinding a lost trade.
Only used when InsufficientFunds appears on exit orders (stoploss or long sell/short buy).
Tries to walk the stored orders and sell them off eventually.
Tries to walk the stored orders and updates the trade state if necessary.
"""
logger.info(f"Trying to refind lost order for {trade}")
for order in trade.orders:
@@ -451,6 +455,42 @@ class FreqtradeBot(LoggingMixin):
except ExchangeError:
logger.warning(f"Error updating {order.order_id}.")
def handle_onexchange_order(self, trade: Trade):
"""
Try refinding a order that is not in the database.
Only used balance disappeared, which would make exiting impossible.
"""
try:
orders = self.exchange.fetch_orders(trade.pair, trade.open_date_utc)
for order in orders:
trade_order = [o for o in trade.orders if o.order_id == order['id']]
if trade_order:
continue
logger.info(f"Found previously unknown order {order['id']} for {trade.pair}.")
order_obj = Order.parse_from_ccxt_object(order, trade.pair, order['side'])
order_obj.order_filled_date = datetime.fromtimestamp(
safe_value_fallback(order, 'lastTradeTimestamp', 'timestamp') // 1000,
tz=timezone.utc)
trade.orders.append(order_obj)
# TODO: how do we handle open_order_id ...
Trade.commit()
prev_exit_reason = trade.exit_reason
trade.exit_reason = ExitType.SOLD_ON_EXCHANGE.value
self.update_trade_state(trade, order['id'], order)
logger.info(f"handled order {order['id']}")
if not trade.is_open:
# Trade was just closed
trade.close_date = order_obj.order_filled_date
Trade.commit()
break
else:
trade.exit_reason = prev_exit_reason
Trade.commit()
except ExchangeError:
logger.warning("Error finding onexchange order")
#
# BUY / enter positions / open trades logic and methods
#
@@ -461,7 +501,7 @@ class FreqtradeBot(LoggingMixin):
"""
trades_created = 0
whitelist = copy.deepcopy(self.active_pair_whitelist)
whitelist = deepcopy(self.active_pair_whitelist)
if not whitelist:
self.log_once("Active pair whitelist is empty.", logger.info)
return trades_created
@@ -490,7 +530,8 @@ class FreqtradeBot(LoggingMixin):
# Create entity and execute trade for each pair from whitelist
for pair in whitelist:
try:
trades_created += self.create_trade(pair)
with self._exit_lock:
trades_created += self.create_trade(pair)
except DependencyException as exception:
logger.warning('Unable to create trade for %s: %s', pair, exception)
@@ -981,7 +1022,7 @@ class FreqtradeBot(LoggingMixin):
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
'fiat_currency': self.config.get('fiat_display_currency', None),
'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount),
'open_date': trade.open_date or datetime.utcnow(),
'open_date': trade.open_date_utc or datetime.now(timezone.utc),
'current_rate': current_rate,
'sub_trade': sub_trade,
}
@@ -1033,6 +1074,13 @@ class FreqtradeBot(LoggingMixin):
"""
trades_closed = 0
for trade in trades:
if trade.open_order_id is None and not self.wallets.check_exit_amount(trade):
logger.warning(
f'Not enough {trade.safe_base_currency} in wallet to exit {trade}. '
'Trying to recover.')
self.handle_onexchange_order(trade)
try:
try:
if (self.strategy.order_types.get('stoploss_on_exchange') and
@@ -1254,6 +1302,10 @@ class FreqtradeBot(LoggingMixin):
f"(orderid:{order['id']}) in order to add another one ...")
self.cancel_stoploss_on_exchange(trade)
if not trade.is_open:
logger.warning(
f"Trade {trade} is closed, not creating trailing stoploss order.")
return
# Create new stoploss order
if not self.create_stoploss_order(trade=trade, stop_price=stoploss_norm):
@@ -1425,7 +1477,7 @@ class FreqtradeBot(LoggingMixin):
corder = order
reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE']
logger.info('%s order %s for %s.', side, reason, trade)
logger.info(f'{side} order {reason} for {trade}.')
# Using filled to determine the filled amount
filled_amount = safe_value_fallback2(corder, order, 'filled', 'filled')
@@ -1507,7 +1559,7 @@ class FreqtradeBot(LoggingMixin):
trade.exit_reason = None
trade.open_order_id = None
self.update_trade_state(trade, trade.open_order_id, order)
self.update_trade_state(trade, order['id'], order)
logger.info(f'{trade.exit_side.capitalize()} order {reason} for {trade}.')
trade.close_rate = None
@@ -1535,13 +1587,13 @@ class FreqtradeBot(LoggingMixin):
# Update wallets to ensure amounts tied up in a stoploss is now free!
self.wallets.update()
if self.trading_mode == TradingMode.FUTURES:
# A safe exit amount isn't needed for futures, you can just exit/close the position
return amount
trade_base_currency = self.exchange.get_pair_base_currency(pair)
wallet_amount = self.wallets.get_free(trade_base_currency)
logger.debug(f"{pair} - Wallet: {wallet_amount} - Trade-amount: {amount}")
if wallet_amount >= amount:
# A safe exit amount isn't needed for futures, you can just exit/close the position
return amount
elif wallet_amount > amount * 0.98:
logger.info(f"{pair} - Falling back to wallet-amount {wallet_amount} -> {amount}.")
@@ -1697,8 +1749,8 @@ class FreqtradeBot(LoggingMixin):
'enter_tag': trade.enter_tag,
'sell_reason': trade.exit_reason, # Deprecated
'exit_reason': trade.exit_reason,
'open_date': trade.open_date,
'close_date': trade.close_date or datetime.utcnow(),
'open_date': trade.open_date_utc,
'close_date': trade.close_date_utc or datetime.now(timezone.utc),
'stake_amount': trade.stake_amount,
'stake_currency': self.config['stake_currency'],
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
@@ -1720,10 +1772,8 @@ class FreqtradeBot(LoggingMixin):
else:
trade.exit_order_status = reason
order = trade.select_order_by_order_id(order_id)
if not order:
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened.")
order_or_none = trade.select_order_by_order_id(order_id)
order = self.order_obj_or_raise(order_id, order_or_none)
profit_rate: float = trade.safe_close_rate
profit_trade = trade.calc_profit(rate=profit_rate)
@@ -1764,6 +1814,12 @@ class FreqtradeBot(LoggingMixin):
# Send the message
self.rpc.send_msg(msg)
def order_obj_or_raise(self, order_id: str, order_obj: Optional[Order]) -> Order:
if not order_obj:
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened.")
return order_obj
#
# Common update trade state methods
#
@@ -1802,10 +1858,8 @@ class FreqtradeBot(LoggingMixin):
# Handling of this will happen in check_handle_timedout.
return True
order_obj = trade.select_order_by_order_id(order_id)
if not order_obj:
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened.")
order_obj_or_none = trade.select_order_by_order_id(order_id)
order_obj = self.order_obj_or_raise(order_id, order_obj_or_none)
self.handle_order_fee(trade, order_obj, order)
@@ -1823,16 +1877,18 @@ class FreqtradeBot(LoggingMixin):
# Must also run for partial exits
# TODO: Margin will need to use interest_rate as well.
# interest_rate = self.exchange.get_interest_rate()
trade.set_liquidation_price(self.exchange.get_liquidation_price(
pair=trade.pair,
open_rate=trade.open_rate,
is_short=trade.is_short,
amount=trade.amount,
stake_amount=trade.stake_amount,
leverage=trade.leverage,
wallet_balance=trade.stake_amount,
))
try:
trade.set_liquidation_price(self.exchange.get_liquidation_price(
pair=trade.pair,
open_rate=trade.open_rate,
is_short=trade.is_short,
amount=trade.amount,
stake_amount=trade.stake_amount,
leverage=trade.leverage,
wallet_balance=trade.stake_amount,
))
except DependencyException:
logger.warning('Unable to calculate liquidation price')
# Updating wallets when order is closed
self.wallets.update()
Trade.commit()
@@ -1883,6 +1939,7 @@ class FreqtradeBot(LoggingMixin):
"""
Applies the fee to amount (either from Order or from Trades).
Can eat into dust if more than the required asset is available.
In case of trade adjustment orders, trade.amount will not have been adjusted yet.
Can't happen in Futures mode - where Fees are always in settlement currency,
never in base currency.
"""
@@ -1892,6 +1949,10 @@ class FreqtradeBot(LoggingMixin):
# check against remaining amount!
amount_ = trade.amount - amount
if trade.nr_of_successful_entries >= 1 and order_obj.ft_order_side == trade.entry_side:
# In case of rebuy's, trade.amount doesn't contain the amount of the last entry.
amount_ = trade.amount + amount
if fee_abs != 0 and self.wallets.get_free(trade_base_currency) >= amount_:
# Eat into dust if we own more than base currency
logger.info(f"Fee amount for {trade} was in base currency - "
@@ -1921,7 +1982,11 @@ class FreqtradeBot(LoggingMixin):
# Init variables
order_amount = safe_value_fallback(order, 'filled', 'amount')
# Only run for closed orders
if trade.fee_updated(order.get('side', '')) or order['status'] == 'open':
if (
trade.fee_updated(order.get('side', ''))
or order['status'] == 'open'
or order_obj.ft_fee_base
):
return None
trade_base_currency = self.exchange.get_pair_base_currency(trade.pair)

View File

@@ -5,6 +5,7 @@ from logging.handlers import RotatingFileHandler, SysLogHandler
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
from freqtrade.loggers.buffering_handler import FTBufferingHandler
from freqtrade.loggers.set_log_levels import set_loggers
from freqtrade.loggers.std_err_stream_handler import FTStdErrStreamHandler
@@ -16,28 +17,6 @@ bufferHandler = FTBufferingHandler(1000)
bufferHandler.setFormatter(Formatter(LOGFORMAT))
def _set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None:
"""
Set the logging level for third party libraries
:return: None
"""
logging.getLogger('requests').setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger("urllib3").setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger('ccxt.base.exchange').setLevel(
logging.INFO if verbosity <= 2 else logging.DEBUG
)
logging.getLogger('telegram').setLevel(logging.INFO)
logging.getLogger('werkzeug').setLevel(
logging.ERROR if api_verbosity == 'error' else logging.INFO
)
def get_existing_handlers(handlertype):
"""
Returns Existing handler or None (if the handler has not yet been added to the root handlers).
@@ -114,6 +93,6 @@ def setup_logging(config: Config) -> None:
logging.root.addHandler(handler_rf)
logging.root.setLevel(logging.INFO if verbosity < 1 else logging.DEBUG)
_set_loggers(verbosity, config.get('api_server', {}).get('verbosity', 'info'))
set_loggers(verbosity, config.get('api_server', {}).get('verbosity', 'info'))
logger.info('Verbosity set to %s', verbosity)

View File

@@ -0,0 +1,55 @@
import logging
logger = logging.getLogger(__name__)
def set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None:
"""
Set the logging level for third party libraries
:return: None
"""
logging.getLogger('requests').setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger("urllib3").setLevel(
logging.INFO if verbosity <= 1 else logging.DEBUG
)
logging.getLogger('ccxt.base.exchange').setLevel(
logging.INFO if verbosity <= 2 else logging.DEBUG
)
logging.getLogger('telegram').setLevel(logging.INFO)
logging.getLogger('httpx').setLevel(logging.WARNING)
logging.getLogger('werkzeug').setLevel(
logging.ERROR if api_verbosity == 'error' else logging.INFO
)
__BIAS_TESTER_LOGGERS = [
'freqtrade.resolvers',
'freqtrade.strategy.hyper',
'freqtrade.configuration.config_validation',
]
def reduce_verbosity_for_bias_tester() -> None:
"""
Reduce verbosity for bias tester.
It loads the same strategy several times, which would spam the log.
"""
logger.info("Reducing verbosity for bias tester.")
for logger_name in __BIAS_TESTER_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.WARNING)
def restore_verbosity_for_bias_tester() -> None:
"""
Restore verbosity after bias tester.
"""
logger.info("Restoring log verbosity.")
log_level = logging.NOTSET
for logger_name in __BIAS_TESTER_LOGGERS:
logging.getLogger(logger_name).setLevel(log_level)

Some files were not shown because too many files have changed in this diff Show More