Merge branch 'develop' into feature/proceed-exit-while-open-order

This commit is contained in:
Axel-CH
2024-12-01 10:51:41 -04:00
219 changed files with 18735 additions and 7501 deletions

View File

@@ -46,7 +46,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-${{ matrix.python-version }}-ubuntu
- name: TA binary *nix
if: steps.cache.outputs.cache-hit != 'true'
@@ -68,6 +68,12 @@ jobs:
python build_helpers/freqtrade_client_version_align.py
- name: Tests
if: (!(runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04'))
run: |
pytest --random-order
- name: Tests with Coveralls
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04')
run: |
pytest --random-order --cov=freqtrade --cov=freqtrade_client --cov-config=.coveragerc
@@ -138,7 +144,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ "macos-12", "macos-13", "macos-14" ]
os: [ "macos-13", "macos-14", "macos-15" ]
python-version: ["3.10", "3.11", "3.12"]
steps:
@@ -161,7 +167,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~/Library/Caches/pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-${{ matrix.os }}-${{ matrix.python-version }}
- name: TA binary *nix
if: steps.cache.outputs.cache-hit != 'true'
@@ -190,7 +196,7 @@ jobs:
rm /usr/local/bin/python3.11-config || true
rm /usr/local/bin/python3.12-config || true
brew install hdf5 c-blosc libomp
brew install libomp
- name: Installation (python)
run: |
@@ -274,7 +280,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~\AppData\Local\pip\Cache
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-${{ matrix.os }}-${{ matrix.python-version }}
- name: Installation
run: |
@@ -414,7 +420,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-3.12-ubuntu
- name: TA binary *nix
if: steps.cache.outputs.cache-hit != 'true'
@@ -534,12 +540,12 @@ jobs:
- name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@v1.10.3
uses: pypa/gh-action-pypi-publish@v1.12.2
with:
repository-url: https://test.pypi.org/legacy/
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.10.3
uses: pypa/gh-action-pypi-publish@v1.12.2
deploy-docker:

View File

@@ -9,17 +9,17 @@ repos:
# stages: [push]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "v1.11.2"
rev: "v1.13.0"
hooks:
- id: mypy
exclude: build_helpers
additional_dependencies:
- types-cachetools==5.5.0.20240820
- types-filelock==3.2.7
- types-requests==2.32.0.20240914
- types-requests==2.32.0.20241016
- types-tabulate==0.9.0.20240106
- types-python-dateutil==2.9.0.20241003
- SQLAlchemy==2.0.35
- SQLAlchemy==2.0.36
# stages: [push]
- repo: https://github.com/pycqa/isort
@@ -31,7 +31,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.6.9'
rev: 'v0.8.0'
hooks:
- id: ruff
- id: ruff-format

View File

@@ -33,6 +33,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
- [X] [Bybit](https://bybit.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [Kraken](https://kraken.com/)
- [X] [OKX](https://okx.com/) (Former OKEX)
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
@@ -41,6 +42,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
- [X] [Binance](https://www.binance.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [OKX](https://okx.com/)
- [X] [Bybit](https://bybit.com/)

View File

@@ -682,12 +682,18 @@
},
"exit_fill": {
"description": "Telegram setting for exit fill signals.",
"type": "string",
"enum": [
"on",
"off",
"silent"
"type": [
"string",
"object"
],
"additionalProperties": {
"type": "string",
"enum": [
"on",
"off",
"silent"
]
},
"default": "on"
},
"exit_cancel": {
@@ -1383,6 +1389,11 @@
"type": "string",
"default": "example"
},
"wait_for_training_iteration_on_reload": {
"description": "Wait for the next training iteration to complete after /reload or ctrl+c.",
"type": "boolean",
"default": true
},
"feature_parameters": {
"description": "The parameters used to engineer the feature set",
"type": "object",

View File

@@ -37,8 +37,8 @@ class SuperDuperHyperOptLoss(IHyperOptLoss):
min_date: datetime,
max_date: datetime,
config: Config,
processed: Dict[str, DataFrame],
backtest_stats: Dict[str, Any],
processed: dict[str, DataFrame],
backtest_stats: dict[str, Any],
**kwargs,
) -> float:
"""
@@ -103,7 +103,7 @@ class MyAwesomeStrategy(IStrategy):
SKDecimal(0.01, 0.20, decimals=3, name='roi_p3'),
]
def generate_roi_table(params: Dict) -> Dict[int, float]:
def generate_roi_table(params: Dict) -> dict[int, float]:
roi_table = {}
roi_table[0] = params['roi_p1'] + params['roi_p2'] + params['roi_p3']

View File

@@ -10,12 +10,14 @@ To learn how to get data for the pairs and exchange you're interested in, head o
```
usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [-s NAME]
[--strategy-path PATH] [-i TIMEFRAME]
[--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5}]
[--strategy-path PATH]
[--recursive-strategy-search]
[--freqaimodel NAME] [--freqaimodel-path PATH]
[-i TIMEFRAME] [--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
[--max-open-trades INT]
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
[-p PAIRS [PAIRS ...]] [--eps] [--dmmp]
[-p PAIRS [PAIRS ...]] [--eps]
[--enable-protections]
[--dry-run-wallet DRY_RUN_WALLET]
[--timeframe-detail TIMEFRAME_DETAIL]
@@ -24,8 +26,9 @@ usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[--export-filename PATH]
[--breakdown {day,week,month} [{day,week,month} ...]]
[--cache {none,day,week,month}]
[--freqai-backtest-live-models]
optional arguments:
options:
-h, --help show this help message and exit
-i TIMEFRAME, --timeframe TIMEFRAME
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
@@ -48,10 +51,6 @@ optional arguments:
--eps, --enable-position-stacking
Allow buying the same pair multiple times (position
stacking).
--dmmp, --disable-max-market-positions
Disable applying `max_open_trades` during backtest
(same as setting `max_open_trades` to a very high
number).
--enable-protections, --enableprotections
Enable protections for backtesting.Will slow
backtesting down by a considerable amount, but will
@@ -80,10 +79,13 @@ optional arguments:
--cache {none,day,week,month}
Load a cached backtest result no older than specified
age (default: day).
--freqai-backtest-live-models
Run backtest with ready models.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
--logfile FILE, --log-file FILE
Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
@@ -92,7 +94,7 @@ Common arguments:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
@@ -102,6 +104,12 @@ Strategy arguments:
Specify strategy class name which will be used by the
bot.
--strategy-path PATH Specify additional strategy lookup path.
--recursive-strategy-search
Recursively search for a strategy in the strategies
folder.
--freqaimodel NAME Specify a custom freqaimodels.
--freqaimodel-path PATH
Specify additional lookup path for freqaimodels.
```
@@ -558,6 +566,7 @@ Since backtesting lacks some detailed information about what happens within a ca
- Stoploss
- ROI
- Trailing stoploss
- Position reversals (futures only) happen if an entry signal in the other direction than the closing trade triggers at the candle the existing trade closes.
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
Also, keep in mind that past results don't guarantee future success.
@@ -572,7 +581,7 @@ These limits are usually listed in the exchange documentation as "trading rules"
Backtesting (as well as live and dry-run) does honor these limits, and will ensure that a stoploss can be placed below this value - so the value will be slightly higher than what the exchange specifies.
Freqtrade has however no information about historic limits.
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50$.
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50\$.
For example:

View File

@@ -146,10 +146,10 @@ Freqtrade can also load many options via command line (CLI) arguments (check out
The prevalence for all Options is as follows:
- CLI arguments override any other option
- [Environment Variables](#environment-variables)
- Configuration files are used in sequence (the last file wins) and override Strategy configurations.
- Strategy configurations are only used if they are not set via configuration or command-line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
* CLI arguments override any other option
* [Environment Variables](#environment-variables)
* Configuration files are used in sequence (the last file wins) and override Strategy configurations.
* Strategy configurations are only used if they are not set via configuration or command-line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
### Parameters table
@@ -183,7 +183,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `margin_mode` | When trading with leverage, this determines if the collateral owned by the trader will be shared or isolated to each trading pair [leverage documentation](leverage.md). <br> **Datatype:** String
| `liquidation_buffer` | A ratio specifying how large of a safety net to place between the liquidation price and the stoploss to prevent a position from reaching the liquidation price [leverage documentation](leverage.md). <br>*Defaults to `0.05`.* <br> **Datatype:** Float
| | **Unfilled timeout**
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.exit` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled exit order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.unit` | Unit to use in unfilledtimeout setting. Note: If you set unfilledtimeout.unit to "seconds", "internals.process_throttle_secs" must be inferior or equal to timeout [Strategy Override](#parameters-in-the-strategy). <br> *Defaults to `"minutes"`.* <br> **Datatype:** String
| `unfilledtimeout.exit_timeout_count` | How many times can exit orders time out. Once this number of timeouts is reached, an emergency exit is triggered. 0 to disable and allow unlimited order cancels. [Strategy Override](#parameters-in-the-strategy).<br>*Defaults to `0`.* <br> **Datatype:** Integer
@@ -295,10 +295,10 @@ Values set in the configuration file always overwrite values set in the strategy
* `order_time_in_force`
* `unfilledtimeout`
* `disable_dataframe_checks`
- `use_exit_signal`
* `use_exit_signal`
* `exit_profit_only`
- `exit_profit_offset`
- `ignore_roi_if_entry_signal`
* `exit_profit_offset`
* `ignore_roi_if_entry_signal`
* `ignore_buying_expired_candle_after`
* `position_adjustment_enable`
* `max_entry_position_adjustment`
@@ -311,12 +311,12 @@ There are several methods to configure how much of the stake currency the bot wi
The minimum stake amount will depend on exchange and pair and is usually listed in the exchange support pages.
Assuming the minimum tradable amount for XRP/USD is 20 XRP (given by the exchange), and the price is 0.6$, the minimum stake amount to buy this pair is `20 * 0.6 ~= 12`.
This exchange has also a limit on USD - where all orders must be > 10$ - which however does not apply in this case.
Assuming the minimum tradable amount for XRP/USD is 20 XRP (given by the exchange), and the price is 0.6\$, the minimum stake amount to buy this pair is `20 * 0.6 ~= 12`.
This exchange has also a limit on USD - where all orders must be > 10\$ - which however does not apply in this case.
To guarantee safe execution, freqtrade will not allow buying with a stake-amount of 10.1$, instead, it'll make sure that there's enough space to place a stoploss below the pair (+ an offset, defined by `amount_reserve_percent`, which defaults to 5%).
To guarantee safe execution, freqtrade will not allow buying with a stake-amount of 10.1\$, instead, it'll make sure that there's enough space to place a stoploss below the pair (+ an offset, defined by `amount_reserve_percent`, which defaults to 5%).
With a reserve of 5%, the minimum stake amount would be ~12.6$ (`12 * (1 + 0.05)`). If we take into account a stoploss of 10% on top of that - we'd end up with a value of ~14$ (`12.6 / (1 - 0.1)`).
With a reserve of 5%, the minimum stake amount would be ~12.6\$ (`12 * (1 + 0.05)`). If we take into account a stoploss of 10% on top of that - we'd end up with a value of ~14\$ (`12.6 / (1 - 0.1)`).
To limit this calculation in case of large stoploss values, the calculated minimum stake-limit will never be more than 50% above the real limit.
@@ -362,9 +362,9 @@ To overcome this, the option `amend_last_stake_amount` can be set to `True`, whi
In the example above this would mean:
- Trade1: 400 USDT
- Trade2: 400 USDT
- Trade3: 200 USDT
* Trade1: 400 USDT
* Trade2: 400 USDT
* Trade3: 200 USDT
!!! Note
This option only applies with [Static stake amount](#static-stake-amount) - since [Dynamic stake amount](#dynamic-stake-amount) divides the balances evenly.

View File

@@ -11,9 +11,8 @@ Without provided configuration, `--exchange` becomes mandatory.
You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used.
!!! Tip "Tip: Updating existing data"
If you already have backtesting data available in your data-directory and would like to refresh this data up to today, freqtrade will automatically calculate the data missing for the existing pairs and the download will occur from the latest available point until "now", neither --days or --timerange parameters are required. Freqtrade will keep the available data and only download the missing data.
If you are updating existing data after inserting new pairs that you have no data for, use `--new-pairs-days xx` parameter. Specified number of days will be downloaded for new pairs while old pairs will be updated with missing data only.
If you use `--days xx` parameter alone - data for specified number of days will be downloaded for _all_ pairs. Be careful, if specified number of days is smaller than gap between now and last downloaded candle - freqtrade will delete all existing data to avoid gaps in candle data.
If you already have backtesting data available in your data-directory and would like to refresh this data up to today, freqtrade will automatically calculate the missing timerange for the existing pairs and the download will occur from the latest available point until "now", neither `--days` or `--timerange` parameters are required. Freqtrade will keep the available data and only download the missing data.
If you are updating existing data after inserting new pairs that you have no data for, use the `--new-pairs-days xx` parameter. Specified number of days will be downloaded for new pairs while old pairs will be updated with missing data only.
### Usage
@@ -90,7 +89,7 @@ Common arguments:
!!! Tip "Downloading all data for one quote currency"
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
`freqtrade download-data --exchange binance --pairs ".*/USDT" <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
!!! Note "Startup period"
@@ -117,16 +116,17 @@ freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
or as regex (in this case, to download all active USDT pairs)
```bash
freqtrade download-data --exchange binance --pairs .*/USDT
freqtrade download-data --exchange binance --pairs ".*/USDT"
```
### Other Notes
* To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
* To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
* To change the exchange used to download the historical data from, either use `--exchange <exchange>` - or specify a different configuration file.
* To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
* To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
* To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
* Given starting points are ignored if data is already available, downloading only missing data up to today.
* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.

View File

@@ -116,7 +116,7 @@ A similar setup can also be taken for Pycharm - using `freqtrade` as module name
![Pycharm debug configuration](assets/pycharm_debug.png)
!!! Note "Startup directory"
This assumes that you have the repository checked out, and the editor is started at the repository root level (so setup.py is at the top level of your repository).
This assumes that you have the repository checked out, and the editor is started at the repository root level (so pyproject.toml is at the top level of your repository).
## ErrorHandling
@@ -162,7 +162,7 @@ Hopefully you also want to contribute this back upstream.
Whatever your motivations are - This should get you off the ground in trying to develop a new Pairlist Handler.
First of all, have a look at the [VolumePairList](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/pairlist/VolumePairList.py) Handler, and best copy this file with a name of your new Pairlist Handler.
First of all, have a look at the [VolumePairList](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/plugins/pairlist/VolumePairList.py) Handler, and best copy this file with a name of your new Pairlist Handler.
This is a simple Handler, which however serves as a good example on how to start developing.
@@ -226,7 +226,7 @@ In `VolumePairList`, this implements different methods of sorting, does early va
##### sample
``` python
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
def filter_pairlist(self, pairlist: list[str], tickers: dict) -> List[str]:
# Generate dynamic whitelist
pairs = self._calculate_pairlist(pairlist, tickers)
return pairs

View File

@@ -252,6 +252,14 @@ OKX requires a passphrase for each api key, you will therefore need to add this
Gate.io allows the use of `POINT` to pay for fees. As this is not a tradable currency (no regular market available), automatic fee calculations will fail (and default to a fee of 0).
The configuration parameter `exchange.unknown_fee_rate` can be used to specify the exchange rate between Point and the stake currency. Obviously, changing the stake-currency will also require changes to this value.
Gate API keys require the following permissions on top of the market type you want to trade:
* "Spot Trade" _or_ "Perpetual Futures" (Read and Write) (either select both, or the one matching the market you want to trade)
* "Wallet" (read only)
* "Account" (read only)
Without these permissions, the bot will not start correctly and show errors like "permission missing".
## Bybit
Futures trading on bybit is currently supported for USDT markets, and will use isolated futures mode.
@@ -261,6 +269,7 @@ On startup, freqtrade will set the position mode to "One-way Mode" for the whole
As bybit doesn't provide funding rate history, the dry-run calculation is used for live trades as well.
API Keys for live futures trading must have the following permissions:
* Read-write
* Contract - Orders
* Contract - Positions
@@ -295,6 +304,41 @@ It's therefore required to pass the UID as well.
!!! Warning "Necessary Verification"
Bitmart requires Verification Lvl2 to successfully trade on the spot market through the API - even though trading via UI works just fine with just Lvl1 verification.
## Hyperliquid
!!! Tip "Stoploss on Exchange"
Hyperliquid supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it.
Hyperliquid is a Decentralized Exchange (DEX). Decentralized exchanges work a bit different compared to normal exchanges. Instead of authenticating private API calls using an API key, private API calls need to be signed with the private key of your wallet (We recommend using an api Wallet for this, generated either on Hyperliquid or in your wallet of choice).
This needs to be configured like this:
```json
"exchange": {
"name": "hyperliquid",
"walletAddress": "your_eth_wallet_address",
"privateKey": "your_api_private_key",
// ...
}
```
* walletAddress in hex format: `0x<40 hex characters>` - Can be easily copied from your wallet - and should be your wallet address, not your API Wallet Address.
* privateKey in hex format: `0x<64 hex characters>` - Use the key the API Wallet shows on creation.
Hyperliquid handles deposits and withdrawals on the Arbitrum One chain, a Layer 2 scaling solution built on top of Ethereum. Hyperliquid uses USDC as quote / collateral. The process of depositing USDC on Hyperliquid requires a couple of steps, see [how to start trading](https://hyperliquid.gitbook.io/hyperliquid-docs/onboarding/how-to-start-trading) for details on what steps are needed.
!!! Note "Hyperliquid general usage Notes"
Hyperliquid does not support market orders, however ccxt will simulate market orders by placing limit orders with a maximum slippage of 5%.
Unfortunately, hyperliquid only offers 5000 historic candles, so backtesting will either need to build candles historically (by waiting and downloading the data incrementally over time) - or will be limited to the last 5000 candles.
!!! Info "Some general best practices (non exhaustive)"
* Beware of supply chain attacks, like pip package poisoning etcetera. Whenever you use your private key, make sure your environment is safe.
* Don't use your actual wallet private key for trading. Use the Hyperliquid [API generator](https://app.hyperliquid.xyz/API) to create a separate API wallet.
* Don't store your actual wallet private key on the server you use for freqtrade. Use the API wallet private key instead. This key won't allow withdrawals, only trading.
* Always keep your mnemonic phrase and private key private.
* Don't use the same mnemonic as the one you had to backup when initializing a hardware wallet, using the same mnemonic basically deletes the security of your hardware wallet.
* Create a different software wallet, only transfer the funds you want to trade with to that wallet, and use that wallet to trade on Hyperliquid.
* If you have funds you don't want to use for trading (after making a profit for example), transfer them back to your hardware wallet.
## All exchanges
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.
@@ -304,7 +348,7 @@ Should you experience constant errors with Nonce (like `InvalidNonce`), it is be
* The Ocean (exchange id: `theocean`) exchange uses Web3 functionality and requires `web3` python package to be installed:
```shell
$ pip3 install web3
pip3 install web3
```
### Getting latest price / Incomplete candles

View File

@@ -100,6 +100,19 @@ You can use the `/stopentry` command in Telegram to prevent future trade entry,
Please look at the [advanced setup documentation Page](advanced-setup.md#running-multiple-instances-of-freqtrade).
### I'm getting "Impossible to load Strategy" when starting the bot
This error message is shown when the bot cannot load the strategy.
Usually, you can use `freqtrade list-strategies` to list all available strategies.
The output of this command will also include a status column, showing if the strategy can be loaded.
Please check the following:
* Are you using the correct strategy name? The strategy name is case-sensitive and must correspond to the Strategy class name (not the filename!).
* Is the strategy in the `user_data/strategies` directory, and has the file-ending `.py`?
* Does the bot show other warnings before this error? Maybe you're missing some dependencies for the strategy - which would be highlighted in the log.
* In case of docker - is the strategy directory mounted correctly (check the volumes part of the docker-compose file)?
### I'm getting "Missing data fillup" messages in the log
This message is just a warning that the latest candles had missing candles in them.
@@ -146,9 +159,9 @@ The same fix should be applied in the configuration file, if order types are def
### I'm trying to start the bot live, but get an API permission error
Errors like `Invalid API-key, IP, or permissions for action` mean exactly what they actually say.
Your API key is either invalid (copy/paste error? check for leading/trailing spaces in the config), expired, or the IP you're running the bot from is not enabled in the Exchange's API console.
Usually, the permission "Spot Trading" (or the equivalent in the exchange you use) will be necessary.
Errors like `Invalid API-key, IP, or permissions for action` mean exactly what they actually say.
Your API key is either invalid (copy/paste error? check for leading/trailing spaces in the config), expired, or the IP you're running the bot from is not enabled in the Exchange's API console.
Usually, the permission "Spot Trading" (or the equivalent in the exchange you use) will be necessary.
Futures will usually have to be enabled specifically.
### How do I search the bot logs for something?

View File

@@ -293,10 +293,10 @@ class MyCoolPyTorchClassifier(BasePyTorchClassifier):
super().__init__(**kwargs)
config = self.freqai_info.get("model_training_parameters", {})
self.learning_rate: float = config.get("learning_rate", 3e-4)
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
"""
User sets up the training and test data to fit their desired model here
:param data_dictionary: the dictionary holding all data for train, test,
@@ -359,10 +359,10 @@ class PyTorchMLPRegressor(BasePyTorchRegressor):
super().__init__(**kwargs)
config = self.freqai_info.get("model_training_parameters", {})
self.learning_rate: float = config.get("learning_rate", 3e-4)
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
n_features = data_dictionary["train_features"].shape[-1]
model = PyTorchMLPModel(
input_dim=n_features,
@@ -393,7 +393,7 @@ Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. T
For example, if you are using a binary classifier to predict price movements as up or down, you can set the class names as follows:
```python
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame:
def set_freqai_targets(self, dataframe: DataFrame, metadata: dict, **kwargs) -> DataFrame:
self.freqai.class_names = ["down", "up"]
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
dataframe["close"], 'up', 'down')

View File

@@ -22,6 +22,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
| `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer.
| `activate_tensorboard` | <br> Indicate whether or not to activate tensorboard for the tensorboard enabled modules (currently Reinforcment Learning, XGBoost, Catboost, and PyTorch). Tensorboard needs Torch installed, which means you will need the torch/RL docker image or you need to answer "yes" to the install question about whether or not you wish to install Torch. <br> **Datatype:** Boolean. <br> Default: `True`.
| `wait_for_training_iteration_on_reload` | <br> When using /reload or ctrl-c, wait for the current training iteration to finish before completing graceful shutdown. If set to `False`, FreqAI will break the current training iteration, allowing you to shutdown gracefully more quickly, but you will lose your current training iteration. <br> **Datatype:** Boolean. <br> Default: `True`.
### Feature parameters

View File

@@ -42,11 +42,11 @@ usage: freqtrade hyperopt [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
[--recursive-strategy-search] [--freqaimodel NAME]
[--freqaimodel-path PATH] [-i TIMEFRAME]
[--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5}]
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
[--max-open-trades INT]
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
[-p PAIRS [PAIRS ...]] [--hyperopt-path PATH]
[--eps] [--dmmp] [--enable-protections]
[--eps] [--enable-protections]
[--dry-run-wallet DRY_RUN_WALLET]
[--timeframe-detail TIMEFRAME_DETAIL] [-e INT]
[--spaces {all,buy,sell,roi,stoploss,trailing,protection,trades,default} [{all,buy,sell,roi,stoploss,trailing,protection,trades,default} ...]]
@@ -55,15 +55,15 @@ usage: freqtrade hyperopt [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
[--hyperopt-loss NAME] [--disable-param-export]
[--ignore-missing-spaces] [--analyze-per-epoch]
optional arguments:
options:
-h, --help show this help message and exit
-i TIMEFRAME, --timeframe TIMEFRAME
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
--timerange TIMERANGE
Specify what timerange of data to use.
--data-format-ohlcv {json,jsongz,hdf5}
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
Storage format for downloaded candle (OHLCV) data.
(default: `json`).
(default: `feather`).
--max-open-trades INT
Override the value of the `max_open_trades`
configuration setting.
@@ -80,10 +80,6 @@ optional arguments:
--eps, --enable-position-stacking
Allow buying the same pair multiple times (position
stacking).
--dmmp, --disable-max-market-positions
Disable applying `max_open_trades` during backtest
(same as setting `max_open_trades` to a very high
number).
--enable-protections, --enableprotections
Enable protections for backtesting.Will slow
backtesting down by a considerable amount, but will
@@ -133,7 +129,8 @@ optional arguments:
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
--logfile FILE, --log-file FILE
Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
@@ -142,7 +139,7 @@ Common arguments:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
@@ -588,14 +585,15 @@ Currently, the following loss functions are builtin:
* `ShortTradeDurHyperOptLoss` - (default legacy Freqtrade hyperoptimization loss function) - Mostly for short trade duration and avoiding losses.
* `OnlyProfitHyperOptLoss` - takes only amount of profit into consideration.
* `SharpeHyperOptLoss` - optimizes Sharpe Ratio calculated on trade returns relative to standard deviation.
* `SharpeHyperOptLossDaily` - optimizes Sharpe Ratio calculated on **daily** trade returns relative to standard deviation.
* `SortinoHyperOptLoss` - optimizes Sortino Ratio calculated on trade returns relative to **downside** standard deviation.
* `SharpeHyperOptLoss` - Optimizes Sharpe Ratio calculated on trade returns relative to standard deviation.
* `SharpeHyperOptLossDaily` - Optimizes Sharpe Ratio calculated on **daily** trade returns relative to standard deviation.
* `SortinoHyperOptLoss` - Optimizes Sortino Ratio calculated on trade returns relative to **downside** standard deviation.
* `SortinoHyperOptLossDaily` - optimizes Sortino Ratio calculated on **daily** trade returns relative to **downside** standard deviation.
* `MaxDrawDownHyperOptLoss` - Optimizes Maximum absolute drawdown.
* `MaxDrawDownRelativeHyperOptLoss` - Optimizes both maximum absolute drawdown while also adjusting for maximum relative drawdown.
* `CalmarHyperOptLoss` - Optimizes Calmar Ratio calculated on trade returns relative to max drawdown.
* `ProfitDrawDownHyperOptLoss` - Optimizes by max Profit & min Drawdown objective. `DRAWDOWN_MULT` variable within the hyperoptloss file can be adjusted to be stricter or more flexible on drawdown purposes.
* `MultiMetricHyperOptLoss` - Optimizes by several key metrics to achieve balanced performance. The primary focus is on maximizing Profit and minimizing Drawdown, while also considering additional metrics such as Profit Factor, Expectancy Ratio and Winrate. Moreover, it applies a penalty for epochs with a low number of trades, encouraging strategies with adequate trade frequency.
Creation of a custom loss function is covered in the [Advanced Hyperopt](advanced-hyperopt.md) part of the documentation.
@@ -866,18 +864,15 @@ You can use the `--print-all` command line option if you would like to see all r
## Position stacking and disabling max market positions
In some situations, you may need to run Hyperopt (and Backtesting) with the
`--eps`/`--enable-position-staking` and `--dmmp`/`--disable-max-market-positions` arguments.
In some situations, you may need to run Hyperopt (and Backtesting) with the `--eps`/`--enable-position-staking` argument, or you may need to set `max_open_trades` to a very high number to disable the limit on the number of open trades.
By default, hyperopt emulates the behavior of the Freqtrade Live Run/Dry Run, where only one
open trade is allowed for every traded pair. The total number of trades open for all pairs
open trade per pair is allowed. The total number of trades open for all pairs
is also limited by the `max_open_trades` setting. During Hyperopt/Backtesting this may lead to
some potential trades to be hidden (or masked) by previously open trades.
potential trades being hidden (or masked) by already open trades.
The `--eps`/`--enable-position-stacking` argument allows emulation of buying the same pair multiple times,
while `--dmmp`/`--disable-max-market-positions` disables applying `max_open_trades`
during Hyperopt/Backtesting (which is equal to setting `max_open_trades` to a very high
number).
The `--eps`/`--enable-position-stacking` argument allows emulation of buying the same pair multiple times.
Using `--max-open-trades` with a very high number will disable the limit on the number of open trades.
!!! Note
Dry/live runs will **NOT** use position stacking - therefore it does make sense to also validate the strategy without this as it's closer to reality.
@@ -918,13 +913,39 @@ Your epochs should therefore be aligned to the possible values - or you should b
After you run Hyperopt for the desired amount of epochs, you can later list all results for analysis, select only best or profitable once, and show the details for any of the epochs previously evaluated. This can be done with the `hyperopt-list` and `hyperopt-show` sub-commands. The usage of these sub-commands is described in the [Utils](utils.md#list-hyperopt-results) chapter.
## Output debug messages from your strategy
If you want to output debug messages from your strategy, you can use the `logging` module. By default, Freqtrade will output all messages with a level of `INFO` or higher.
``` python
import logging
logger = logging.getLogger(__name__)
class MyAwesomeStrategy(IStrategy):
...
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
logger.info("This is a debug message")
...
```
!!! Note "using print"
Messages printed via `print()` will not be shown in the hyperopt output unless parallelism is disabled (`-j 1`).
It is recommended to use the `logging` module instead.
## Validate backtesting results
Once the optimized strategy has been implemented into your strategy, you should backtest this strategy to make sure everything is working as expected.
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt `--dmmp`/`--disable-max-market-positions` and `--eps`/`--enable-position-stacking` for Backtesting.
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt for Backtesting.
### Why do my backtest results not match my hyperopt results?
Should results not match, check the following factors:
* You may have added parameters to hyperopt in `populate_indicators()` where they will be calculated only once **for all epochs**. If you are, for example, trying to optimise multiple SMA timeperiod values, the hyperoptable timeperiod parameter should be placed in `populate_entry_trend()` which is calculated every epoch. See [Optimizing an indicator parameter](https://www.freqtrade.io/en/stable/hyperopt/#optimizing-an-indicator-parameter).

View File

@@ -352,7 +352,7 @@ The optional `bearer_token` will be included in the requests Authorization Heade
#### MarketCapPairList
`MarketCapPairList` employs sorting/filtering of pairs by their marketcap rank based of CoinGecko. It will only recognize coins up to the coin placed at rank 250. The returned pairlist will be sorted based of their marketcap ranks.
`MarketCapPairList` employs sorting/filtering of pairs by their marketcap rank based of CoinGecko. The returned pairlist will be sorted based of their marketcap ranks.
```json
"pairlists": [
@@ -366,7 +366,8 @@ The optional `bearer_token` will be included in the requests Authorization Heade
]
```
`number_assets` defines the maximum number of pairs returned by the pairlist. `max_rank` will determine the maximum rank used in creating/filtering the pairlist. It's expected that some coins within the top `max_rank` marketcap will not be included in the resulting pairlist since not all pairs will have active trading pairs in your preferred market/stake/exchange combination.
`number_assets` defines the maximum number of pairs returned by the pairlist. `max_rank` will determine the maximum rank used in creating/filtering the pairlist. It's expected that some coins within the top `max_rank` marketcap will not be included in the resulting pairlist since not all pairs will have active trading pairs in your preferred market/stake/exchange combination.
While using a `max_rank` bigger than 250 is supported, it's not recommended, as it'll cause multiple API calls to CoinGecko, which can lead to rate limit issues.
The `refresh_period` setting defines the interval (in seconds) at which the marketcap rank data will be refreshed. The default is 86,400 seconds (1 day). The pairlist cache (`refresh_period`) applies to both generating pairlists (when in the first position in the list) and filtering instances (when not in the first position in the list).

View File

@@ -28,7 +28,7 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
- Develop your Strategy: Write your strategy in python, using [pandas](https://pandas.pydata.org/). Example strategies to inspire you are available in the [strategy repository](https://github.com/freqtrade/freqtrade-strategies).
- Download market data: Download historical data of the exchange and the markets your may want to trade with.
- Backtest: Test your strategy on downloaded historical data.
- Optimize: Find the best parameters for your strategy using hyperoptimization which employs machining learning methods. You can optimize buy, sell, take profit (ROI), stop-loss and trailing stop-loss parameters for your strategy.
- Optimize: Find the best parameters for your strategy using hyperoptimization which employs machine learning methods. You can optimize buy, sell, take profit (ROI), stop-loss and trailing stop-loss parameters for your strategy.
- Select markets: Create your static list or use an automatic one based on top traded volumes and/or prices (not available during backtesting). You can also explicitly blacklist markets you don't want to trade.
- Run: Test your strategy with simulated money (Dry-Run mode) or deploy it with real money (Live-Trade mode).
- Run using Edge (optional module): The concept is to find the best historical [trade expectancy](edge.md#expectancy) by markets based on variation of the stop-loss and then allow/reject markets to trade. The sizing of the trade is based on a risk of a percentage of your capital.
@@ -40,11 +40,12 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
Please read the [exchange specific notes](exchanges.md) to learn about eventual, special configurations needed for each exchange.
- [X] [Binance](https://www.binance.com/)
- [X] [Bitmart](https://bitmart.com/)
- [X] [BingX](https://bingx.com/invite/0EM9RX)
- [X] [Bitmart](https://bitmart.com/)
- [X] [Bybit](https://bybit.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [Kraken](https://kraken.com/)
- [X] [OKX](https://okx.com/) (Former OKEX)
- [ ] [potentially many others through <img alt="ccxt" width="30px" src="assets/ccxt-logo.svg" />](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
@@ -52,9 +53,10 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual,
### Supported Futures Exchanges (experimental)
- [X] [Binance](https://www.binance.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [OKX](https://okx.com/)
- [X] [Bybit](https://bybit.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [OKX](https://okx.com/)
Please make sure to read the [exchange specific notes](exchanges.md), as well as the [trading with leverage](leverage.md) documentation before diving in.

View File

@@ -67,6 +67,18 @@ OS Specific steps are listed first, the common section below is necessary for al
sudo apt install -y python3-pip python3-venv python3-dev python3-pandas git curl
```
=== "MacOS"
#### Install necessary dependencies
Install [Homebrew](https://brew.sh/) if you don't have it already.
```bash
# install packages
brew install gettext libomp
```
!!! Note
The `setup.sh` script will install these dependencies for you - assuming brew is installed on your system.
=== "RaspberryPi/Raspbian"
The following assumes the latest [Raspbian Buster lite image](https://www.raspberrypi.org/downloads/raspbian/).
This image comes with python3.11 preinstalled, making it easy to get freqtrade up and running.
@@ -76,7 +88,7 @@ OS Specific steps are listed first, the common section below is necessary for al
```bash
sudo apt-get install python3-venv libatlas-base-dev cmake curl
# Use pywheels.org to speed up installation
# Use piwheels.org to speed up installation
sudo echo "[global]\nextra-index-url=https://www.piwheels.org/simple" > tee /etc/pip.conf
git clone https://github.com/freqtrade/freqtrade.git
@@ -150,9 +162,7 @@ Each time you open a new terminal, you must run `source .venv/bin/activate` to a
source ./.venv/bin/activate
```
### Congratulations
[You are ready](#you-are-ready), and run the bot
[You are now ready](#you-are-ready) to run the bot.
### Other options of /setup.sh script
@@ -220,7 +230,7 @@ cd ..
rm -rf ./ta-lib*
```
#### Setup Python virtual environment (virtualenv)
### Setup Python virtual environment (virtualenv)
You will run freqtrade in separated `virtual environment`
@@ -232,19 +242,18 @@ python3 -m venv .venv
source .venv/bin/activate
```
#### Install python dependencies
### Install python dependencies
```bash
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt
# install freqtrade
python3 -m pip install -e .
```
### Congratulations
[You are now ready](#you-are-ready) to run the bot.
[You are ready](#you-are-ready), and run the bot
#### (Optional) Post-installation Tasks
### (Optional) Post-installation Tasks
!!! Note
If you run the bot on a server, you should consider using [Docker](docker_quickstart.md) or a terminal multiplexer like `screen` or [`tmux`](https://en.wikipedia.org/wiki/Tmux) to avoid that the bot is stopped on logout.
@@ -333,9 +342,7 @@ cd build_helpers
bash install_ta-lib.sh ${CONDA_PREFIX} nosudo
```
### Congratulations
[You are ready](#you-are-ready), and run the bot
[You are now ready](#you-are-ready) to run the bot.
### Important shortcuts

View File

@@ -1,7 +1,7 @@
markdown==3.7
mkdocs==1.6.1
mkdocs-material==9.5.39
mkdocs-material==9.5.45
mdx_truly_sane_lists==1.3
pymdown-extensions==10.11.2
pymdown-extensions==10.12
jinja2==3.1.4
mike==2.1.3

View File

@@ -36,6 +36,7 @@ The Order-type will be ignored if only one mode is available.
| Gate | limit |
| Okx | limit |
| Kucoin | stop-limit, stop-market|
| Hyperliquid (futures only) | limit |
!!! Note "Tight stoploss"
<ins>Do not set too low/tight stoploss value when using stop loss on exchange!</ins>

196
docs/strategy-101.md Normal file
View File

@@ -0,0 +1,196 @@
# Freqtrade Strategies 101: A Quick Start for Strategy Development
For the purposes of this quick start, we are assuming you are familiar with the basics of trading, and have read the
[Freqtrade basics](bot-basics.md) page.
## Required Knowledge
A strategy in Freqtrade is a Python class that defines the logic for buying and selling cryptocurrency `assets`.
Assets are defined as `pairs`, which represent the `coin` and the `stake`. The coin is the asset you are trading using another currency as the stake.
Data is supplied by the exchange in the form of `candles`, which are made up of a six values: `date`, `open`, `high`, `low`, `close` and `volume`.
`Technical analysis` functions analyse the candle data using various computational and statistical formulae, and produce secondary values called `indicators`.
Indicators are analysed on the asset pair candles to generate `signals`.
Signals are turned into `orders` on a cryptocurrency `exchange`, i.e. `trades`.
We use the terms `entry` and `exit` instead of `buying` and `selling` because Freqtrade supports both `long` and `short` trades.
- **long**: You buy the coin based on a stake, e.g. buying the coin BTC using USDT as your stake, and you make a profit by selling the coin at a higher rate than you paid for. In long trades, profits are made by the coin value going up versus the stake.
- **short**: You borrow capital from the exchange in the form of the coin, and you pay back the stake value of the coin later. In short trades profits are made by the coin value going down versus the stake (you pay the loan off at a lower rate).
Whilst Freqtrade supports spot and futures markets for certain exchanges, for simplicity we will focus on spot (long) trades only.
## Structure of a Basic Strategy
### Main dataframe
Freqtrade strategies use a tabular data structure with rows and columns known as a `dataframe` to generate signals to enter and exit trades.
Each pair in your configured pairlist has its own dataframe. Dataframes are indexed by the `date` column, e.g. `2024-06-31 12:00`.
The next 5 columns represent the `open`, `high`, `low`, `close` and `volume` (OHLCV) data.
### Populate indicator values
The `populate_indicators` function adds columns to the dataframe that represent the technical analysis indicator values.
Examples of common indicators include Relative Strength Index, Bollinger Bands, Money Flow Index, Moving Average, and Average True Range.
Columns are added to the dataframe by calling technical analysis functions, e.g. ta-lib's RSI function `ta.RSI()`, and assigning them to a column name, e.g. `rsi`
```python
dataframe['rsi'] = ta.RSI(dataframe)
```
??? Hint "Technical Analysis libraries"
Different libraries work in different ways to generate indicator values. Please check the documentation of each library to understand
how to integrate it into your strategy. You can also check the [Freqtrade example strategies](https://github.com/freqtrade/freqtrade-strategies) to give you ideas.
### Populate entry signals
The `populate_entry_trend` function defines conditions for an entry signal.
The dataframe column `enter_long` is added to the dataframe, and when a value of `1` is in this column, Freqtrade sees an entry signal.
??? Hint "Shorting"
To enter short trades, use the `enter_short` column.
### Populate exit signals
The `populate_exit_trend` function defines conditions for an exit signal.
The dataframe column `exit_long` is added to the dataframe, and when a value of `1` is in this column, Freqtrade sees an exit signal.
??? Hint "Shorting"
To exit short trades, use the `exit_short` column.
## A simple strategy
Here is a minimal example of a Freqtrade strategy:
```python
from freqtrade.strategy import IStrategy
from pandas import DataFrame
import talib.abstract as ta
class MyStrategy(IStrategy):
# set the initial stoploss to -10%
stoploss = -0.10
# exit profitable positions at any time when the profit is greater than 1%
minimal_roi = {"0": 0.01}
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# generate values for technical analysis indicators
dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)
return dataframe
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# generate entry signals based on indicator values
dataframe.loc[
(dataframe['rsi'] < 30),
'enter_long'] = 1
return dataframe
def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# generate exit signals based on indicator values
dataframe.loc[
(dataframe['rsi'] > 70),
'exit_long'] = 1
return dataframe
```
## Making trades
When a signal is found (a `1` in an entry or exit column), Freqtrade will attempt to make an order, i.e. a `trade` or `position`.
Each new trade position takes up a `slot`. Slots represent the maximum number of concurrent new trades that can be opened.
The number of slots is defined by the `max_open_trades` [configuration](configuration.md) option.
However, there can be a range of scenarios where generating a signal does not always create a trade order. These include:
- not enough remaining stake to buy an asset, or funds in your wallet to sell an asset (including any fees)
- not enough remaining free slots for a new trade to be opened (the number of positions you have open equals the `max_open_trades` option)
- there is already an open trade for a pair (Freqtrade cannot stack positions - however it can [adjust existing positions](strategy-callbacks.md#adjust-trade-position))
- if an entry and exit signal is present on the same candle, they are considered as [colliding](strategy-customization.md#colliding-signals), and no order will be raised
- the strategy actively rejects the trade order due to logic you specify by using one of the relevant [entry](strategy-callbacks.md#trade-entry-buy-order-confirmation) or [exit](strategy-callbacks.md#trade-exit-sell-order-confirmation) callbacks
Read through the [strategy customization](strategy-customization.md) documentation for more details.
## Backtesting and forward testing
Strategy development can be a long and frustrating process, as turning our human "gut instincts" into a working computer-controlled
("algo") strategy is not always straightforward.
Therefore a strategy should be tested to verify that it is going to work as intended.
Freqtrade has two testing modes:
- **backtesting**: using historical data that you [download from an exchange](data-download.md), backtesting is a quick way to assess performance of a strategy. However, it can be very easy to distort results so a strategy will look a lot more profitable than it really is. Check the [backtesting documentation](backtesting.md) for more information.
- **dry run**: often referred to as _forward testing_, dry runs use real time data from the exchange. However, any signals that would result in trades are tracked as normal by Freqtrade, but do not have any trades opened on the exchange itself. Forward testing runs in real time, so whilst it takes longer to get results it is a much more reliable indicator of **potential** performance than backtesting.
Dry runs are enabled by setting `dry_run` to true in your [configuration](configuration.md#using-dry-run-mode).
!!! Warning "Backtests can be very inaccurate"
There are many reasons why backtest results may not match reality. Please check the [backtesting assumptions](backtesting.md#assumptions-made-by-backtesting) and [common strategy mistakes](strategy-customization.md#common-mistakes-when-developing-strategies) documentation.
Some websites that list and rank Freqtrade strategies show impressive backtest results. Do not assume these results are achieveable or realistic.
??? Hint "Useful commands"
Freqtrade includes two useful commands to check for basic flaws in strategies: [lookahead-analysis](lookahead-analysis.md) and [recursive-analysis](recursive-analysis.md).
### Assessing backtesting and dry run results
Always dry run your strategy after backtesting it to see if backtesting and dry run results are sufficiently similar.
If there is any significant difference, verify that your entry and exit signals are consistent and appear on the same candles between the two modes. However, there will always be differences between dry runs and backtests:
- Backtesting assumes all orders fill. In dry runs this might not be the case if using limit orders or there is no volume on the exchange.
- Following an entry signal on candle close, backtesting assumes trades enter at the next candle's open price (unless you have custom pricing callbacks in your strategy). In dry runs, there is often a delay between signals and trades opening.
This is because when new candles come in on your main timeframe, e.g. every 5 minutes, it takes time for Freqtrade to analyse all pair dataframes. Therefore, Freqtrade will attempt to open trades a few seconds (ideally a small a delay as possible)
after candle open.
- As entry rates in dry runs might not match backtesting, this means profit calculations will also differ. Therefore, it is normal if ROI, stoploss, trailing stoploss and callback exits are not identical.
- The more computational "lag" you have between new candles coming in and your signals being raised and trades being opened will result in greater price unpredictability. Make sure your computer is powerful enough to process the data for the number
of pairs you have in your pairlist within a reasonable time. Freqtrade will warn you in the logs if there are significant data processing delays.
## Controlling or monitoring a running bot
Once your bot is running in dry or live mode, Freqtrade has five mechanisms to control or monitor a running bot:
- **[FreqUI](freq-ui.md)**: The easiest to get started with, FreqUI is a web interface to see and control current activity of your bot.
- **[Telegram](telegram-usage.md)**: On mobile devices, Telegram integration is available to get alerts about your bot activity and to control certain aspects.
- **[FTUI](https://github.com/freqtrade/ftui)**: FTUI is a terminal (command line) interface to Freqtrade, and allows monitoring of a running bot only.
- **[REST API](rest-api.md)**: The REST API allows programmers to develop their own tools to interact with a Freqtrade bot.
- **[Webhooks](webhook-config.md)**: Freqtrade can send information to other services, e.g. discord, by webhooks.
### Logs
Freqtrade generates extensive debugging logs to help you understand what's happening. Please familiarise yourself with the information and error messages you might see in your bot logs.
## Final Thoughts
Algo trading is difficult, and most public strategies are not good performers due to the time and effort to make a strategy work profitably in multiple scenarios.
Therefore, taking public strategies and using backtests as a way to assess performance is often problematic. However, Freqtrade provides useful ways to help you make decisions and do your due diligence.
There are many different ways to achieve profitability, and there is no one single tip, trick or config option that will fix a poorly performing strategy.
Freqtrade is an open source platform with a large and helpful community - make sure to visit our [discord channel](https://discord.gg/p7nuUNVfP7) to discuss your strategy with others!
As always, only invest what you are willing to lose.
## Conclusion
Developing a strategy in Freqtrade involves defining entry and exit signals based on technical indicators. By following the structure and methods outlined above, you can create and test your own trading strategies.
Common questions and answers are available on our [FAQ](faq.md).
To continue, refer to the more in-depth [Freqtrade strategy customization documentation](strategy-customization.md).

View File

@@ -38,9 +38,9 @@ class AwesomeStrategy(IStrategy):
trade.set_custom_data(key='entry_type', value=trade_entry_type)
return super().bot_loop_start(**kwargs)
def adjust_entry_price(self, trade: Trade, order: Optional[Order], pair: str,
def adjust_entry_price(self, trade: Trade, order: Order | None, pair: str,
current_time: datetime, proposed_rate: float, current_order_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
entry_tag: str | None, side: str, **kwargs) -> float:
# Limit orders to use and follow SMA200 as price target for the first 10 minutes since entry trigger for BTC/USDT pair.
if (
pair == 'BTC/USDT'

View File

@@ -90,8 +90,8 @@ Called before entering a trade, makes it possible to manage your position size w
class AwesomeStrategy(IStrategy):
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
proposed_stake: float, min_stake: Optional[float], max_stake: float,
leverage: float, entry_tag: Optional[str], side: str,
proposed_stake: float, min_stake: float | None, max_stake: float,
leverage: float, entry_tag: str | None, side: str,
**kwargs) -> float:
dataframe, _ = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)
@@ -165,7 +165,8 @@ Called for open trade every iteration (roughly every 5 seconds) until a trade is
The usage of the custom stoploss method must be enabled by setting `use_custom_stoploss=True` on the strategy object.
The stoploss price can only ever move upwards - if the stoploss value returned from `custom_stoploss` would result in a lower stoploss price than was previously set, it will be ignored. The traditional `stoploss` value serves as an absolute lower level and will be instated as the initial stoploss (before this method is called for the first time for a trade), and is still mandatory.
The stoploss price can only ever move upwards - if the stoploss value returned from `custom_stoploss` would result in a lower stoploss price than was previously set, it will be ignored. The traditional `stoploss` value serves as an absolute lower level and will be instated as the initial stoploss (before this method is called for the first time for a trade), and is still mandatory.
As custom stoploss acts as regular, changing stoploss, it will behave similar to `trailing_stop` - and trades exiting due to this will have the exit_reason of `"trailing_stop_loss"`.
The method must return a stoploss value (float / number) as a percentage of the current price.
E.g. If the `current_rate` is 200 USD, then returning `0.02` will set the stoploss price 2% lower, at 196 USD.
@@ -212,7 +213,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
"""
Custom stoploss logic, returning the new distance relative to current_rate (as ratio).
e.g. returning -0.05 would create a stoploss 5% below current_rate.
@@ -250,7 +251,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# Make sure you have the longest interval first - these conditions are evaluated from top to bottom.
if current_time - timedelta(minutes=120) > trade.open_date_utc:
@@ -276,7 +277,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
if after_fill:
# After an additional order, start with a stoploss of 10% below the new open rate
@@ -305,7 +306,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
if pair in ("ETH/BTC", "XRP/BTC"):
return -0.10
@@ -331,7 +332,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
if current_profit < 0.04:
return None # return None to keep using the initial stoploss
@@ -363,7 +364,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# evaluate highest to lowest, so that highest possible stop is used
if current_profit > 0.40:
@@ -394,7 +395,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
last_candle = dataframe.iloc[-1].squeeze()
@@ -439,7 +440,7 @@ Stoploss values returned from `custom_stoploss()` must specify a percentage rela
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# once the profit has risen above 10%, keep the stoploss at 7% above the open price
if current_profit > 0.10:
@@ -482,7 +483,7 @@ The helper function `stoploss_from_absolute()` can be used to convert from an ab
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
trade_date = timeframe_to_prev_date(self.timeframe, trade.open_date_utc)
candle = dataframe.iloc[-1].squeeze()
@@ -519,8 +520,8 @@ class AwesomeStrategy(IStrategy):
# ... populate_* methods
def custom_entry_price(self, pair: str, trade: Optional[Trade], current_time: datetime, proposed_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
def custom_entry_price(self, pair: str, trade: Trade | None, current_time: datetime, proposed_rate: float,
entry_tag: str | None, side: str, **kwargs) -> float:
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair,
timeframe=self.timeframe)
@@ -530,7 +531,7 @@ class AwesomeStrategy(IStrategy):
def custom_exit_price(self, pair: str, trade: Trade,
current_time: datetime, proposed_rate: float,
current_profit: float, exit_tag: Optional[str], **kwargs) -> float:
current_profit: float, exit_tag: str | None, **kwargs) -> float:
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair,
timeframe=self.timeframe)
@@ -662,7 +663,7 @@ class AwesomeStrategy(IStrategy):
# ... populate_* methods
def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float,
time_in_force: str, current_time: datetime, entry_tag: Optional[str],
time_in_force: str, current_time: datetime, entry_tag: str | None,
side: str, **kwargs) -> bool:
"""
Called right before placing a entry order.
@@ -820,8 +821,8 @@ class DigDeeperStrategy(IStrategy):
# This is called when placing the initial order (opening trade)
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
proposed_stake: float, min_stake: Optional[float], max_stake: float,
leverage: float, entry_tag: Optional[str], side: str,
proposed_stake: float, min_stake: float | None, max_stake: float,
leverage: float, entry_tag: str | None, side: str,
**kwargs) -> float:
# We need to leave most of the funds for possible further DCA orders
@@ -830,11 +831,11 @@ class DigDeeperStrategy(IStrategy):
def adjust_trade_position(self, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float,
min_stake: Optional[float], max_stake: float,
min_stake: float | None, max_stake: float,
current_entry_rate: float, current_exit_rate: float,
current_entry_profit: float, current_exit_profit: float,
**kwargs
) -> Union[Optional[float], Tuple[Optional[float], Optional[str]]]:
) -> float | None | tuple[float | None, str | None]:
"""
Custom trade adjustment logic, returning the stake amount that a trade should be
increased or decreased.
@@ -890,7 +891,7 @@ class DigDeeperStrategy(IStrategy):
# Hope you have a deep wallet!
try:
# This returns first order stake size
stake_amount = filled_entries[0].stake_amount
stake_amount = filled_entries[0].stake_amount_filled
# This then calculates current safety order size
stake_amount = stake_amount * (1 + (count_of_entries * 0.25))
return stake_amount, "1/3rd_increase"
@@ -946,9 +947,9 @@ class AwesomeStrategy(IStrategy):
# ... populate_* methods
def adjust_entry_price(self, trade: Trade, order: Optional[Order], pair: str,
def adjust_entry_price(self, trade: Trade, order: Order | None, pair: str,
current_time: datetime, proposed_rate: float, current_order_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
entry_tag: str | None, side: str, **kwargs) -> float:
"""
Entry price re-adjustment logic, returning the user desired limit price.
This only executes when a order was already placed, still open (unfilled fully or partially)
@@ -975,7 +976,7 @@ class AwesomeStrategy(IStrategy):
pair == "BTC/USDT"
and entry_tag == "long_sma200"
and side == "long"
and (current_time - timedelta(minutes=10)) > trade.open_date_utc
and (current_time - timedelta(minutes=10)) <= trade.open_date_utc
):
# just cancel the order if it has been filled more than half of the amount
if order.filled > order.remaining:
@@ -1003,7 +1004,7 @@ For markets / exchanges that don't support leverage, this method is ignored.
class AwesomeStrategy(IStrategy):
def leverage(self, pair: str, current_time: datetime, current_rate: float,
proposed_leverage: float, max_leverage: float, entry_tag: Optional[str], side: str,
proposed_leverage: float, max_leverage: float, entry_tag: str | None, side: str,
**kwargs) -> float:
"""
Customize leverage for each new trade. This method is only called in futures mode.

View File

@@ -2,52 +2,93 @@
This page explains how to customize your strategies, add new indicators and set up trading rules.
Please familiarize yourself with [Freqtrade basics](bot-basics.md) first, which provides overall info on how the bot operates.
If you haven't already, please familiarize yourself with:
- the [Freqtrade strategy 101](strategy-101.md), which provides a quick start to strategy development
- the [Freqtrade bot basics](bot-basics.md), which provides overall info on how the bot operates
## Develop your own strategy
The bot includes a default strategy file.
Also, several other strategies are available in the [strategy repository](https://github.com/freqtrade/freqtrade-strategies).
You will however most likely have your own idea for a strategy.
This document intends to help you convert your strategy idea into your own strategy.
To get started, use `freqtrade new-strategy --strategy AwesomeStrategy` (you can obviously use your own naming for your strategy).
This will create a new strategy file from a template, which will be located under `user_data/strategies/AwesomeStrategy.py`.
This document intends to help you convert your ideas into a working strategy.
### Generating a strategy template
To get started, you can use the command:
```bash
freqtrade new-strategy --strategy AwesomeStrategy
```
This will create a new strategy called `AwesomeStrategy` from a template, which will be located using the filename `user_data/strategies/AwesomeStrategy.py`.
!!! Note
This is just a template file, which will most likely not be profitable out of the box.
There is a difference between the *name* of the strategy and the filename. In most commands, Freqtrade uses the *name* of the strategy, *not the filename*.
!!! Note
The `new-strategy` command generates starting examples which will not be profitable out of the box.
??? Hint "Different template levels"
`freqtrade new-strategy` has an additional parameter, `--template`, which controls the amount of pre-build information you get in the created strategy. Use `--template minimal` to get an empty strategy without any indicator examples, or `--template advanced` to get a template with most callbacks defined.
`freqtrade new-strategy` has an additional parameter, `--template`, which controls the amount of pre-build information you get in the created strategy. Use `--template minimal` to get an empty strategy without any indicator examples, or `--template advanced` to get a template with more complicated features defined.
### Anatomy of a strategy
A strategy file contains all the information needed to build a good strategy:
A strategy file contains all the information needed to build the strategy logic:
- Candle data in OHLCV format
- Indicators
- Entry strategy rules
- Exit strategy rules
- Minimal ROI recommended
- Stoploss strongly recommended
- Entry logic
- Signals
- Exit logic
- Signals
- Minimal ROI
- Callbacks ("custom functions")
- Stoploss
- Fixed/absolute
- Trailing
- Callbacks ("custom functions")
- Pricing [optional]
- Position adjustment [optional]
The bot also include a sample strategy called `SampleStrategy` you can update: `user_data/strategies/sample_strategy.py`.
You can test it with the parameter: `--strategy SampleStrategy`
The bot includes a sample strategy called `SampleStrategy` that you can use as a basis: `user_data/strategies/sample_strategy.py`.
You can test it with the parameter: `--strategy SampleStrategy`. Remember that you use the strategy class name, not the filename.
Additionally, there is an attribute called `INTERFACE_VERSION`, which defines the version of the strategy interface the bot should use.
The current version is 3 - which is also the default when it's not set explicitly in the strategy.
Future versions will require this to be set.
You may see older strategies set to interface version 2, and these will need to be updated to v3 terminology as future versions will require this to be set.
Starting the bot in dry or live mode is accomplished using the `trade` command:
```bash
freqtrade trade --strategy AwesomeStrategy
```
### Bot modes
Freqtrade strategies can be processed by the Freqtrade bot in 5 main modes:
- backtesting
- hyperopting
- dry ("forward testing")
- live
- FreqAI (not covered here)
Check the [configuration documentation](configuration.md) about how to set the bot to dry or live mode.
**Always use dry mode when testing as this gives you an idea of how your strategy will work in reality without risking capital.**
## Diving in deeper
**For the following section we will use the [user_data/strategies/sample_strategy.py](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/templates/sample_strategy.py)
file as reference.**
!!! Note "Strategies and Backtesting"
To avoid problems and unexpected differences between Backtesting and dry/live modes, please be aware
To avoid problems and unexpected differences between backtesting and dry/live modes, please be aware
that during backtesting the full time range is passed to the `populate_*()` methods at once.
It is therefore best to use vectorized operations (across the whole dataframe, not loops) and
avoid index referencing (`df.iloc[-1]`), but instead use `df.shift()` to get to the previous candle.
@@ -57,14 +98,22 @@ file as reference.**
needs to take care to avoid having the strategy utilize data from the future.
Some common patterns for this are listed in the [Common Mistakes](#common-mistakes-when-developing-strategies) section of this document.
??? Hint "Lookahead and recursive analysis"
Freqtrade includes two helpful commands to help assess common lookahead (using future data) and
recursive bias (variance in indicator values) issues. Before running a strategy in dry or live more,
you should always use these commands first. Please check the relevant documentation for
[lookahead](lookahead-analysis.md) and [recursive](recursive-analysis.md) analysis.
### Dataframe
Freqtrade uses [pandas](https://pandas.pydata.org/) to store/provide the candlestick (OHLCV) data.
Pandas is a great library developed for processing large amounts of data.
Pandas is a great library developed for processing large amounts of data in tabular format.
Each row in a dataframe corresponds to one candle on a chart, with the latest candle always being the last in the dataframe (sorted by date).
Each row in a dataframe corresponds to one candle on a chart, with the latest complete candle always being the last in the dataframe (sorted by date).
``` output
If we were to look at the first few rows of the main dataframe using the pandas `head()` function, we would see:
```output
> dataframe.head()
date open high low close volume
0 2021-11-09 23:25:00+00:00 67279.67 67321.84 67255.01 67300.97 44.62253
@@ -74,20 +123,16 @@ Each row in a dataframe corresponds to one candle on a chart, with the latest ca
4 2021-11-09 23:45:00+00:00 67160.48 67160.48 66901.26 66943.37 111.39292
```
Pandas provides fast ways to calculate metrics. To benefit from this speed, it's advised to not use loops, but use vectorized methods instead.
Vectorized operations perform calculations across the whole range of data and are therefore, compared to looping through each row, a lot faster when calculating indicators.
As a dataframe is a table, simple python comparisons like the following will not work
A dataframe is a table where columns are not single values, but a series of data values. As such, simple python comparisons like the following will not work:
``` python
if dataframe['rsi'] > 30:
dataframe['enter_long'] = 1
```
The above section will fail with `The truth value of a Series is ambiguous. [...]`.
The above section will fail with `The truth value of a Series is ambiguous [...]`.
This must instead be written in a pandas-compatible way, so the operation is performed across the whole dataframe.
This must instead be written in a pandas-compatible way, so the operation is performed across the whole dataframe, i.e. `vectorisation`.
``` python
dataframe.loc[
@@ -97,13 +142,38 @@ This must instead be written in a pandas-compatible way, so the operation is per
With this section, you have a new column in your dataframe, which has `1` assigned whenever RSI is above 30.
Freqtrade uses this new column as an entry signal, where it is assumed that a trade will subsequently open on the next open candle.
Pandas provides fast ways to calculate metrics, i.e. "vectorisation". To benefit from this speed, it is advised to not use loops, but use vectorized methods instead.
Vectorized operations perform calculations across the whole range of data and are therefore, compared to looping through each row, a lot faster when calculating indicators.
??? Hint "Signals vs Trades"
- Signals are generated from indicators at candle close, and are intentions to enter a trade.
- Trades are orders that are executed (on the exchange in live mode) where a trade will then open as close to next candle open as possible.
!!! Warning "Trade order assumptions"
In backtesting, signals are generated on candle close. Trades are then initiated immeditely on next candle open.
In dry and live, this may be delayed due to all pair dataframes needing to be analysed first, then trade processing
for each of those pairs happens. This means that in dry/live you need to be mindful of having as low a computation
delay as possible, usually by running a low number of pairs and having a CPU with a good clock speed.
#### Why can't I see "real time" candle data?
Freqtrade does not store incomplete/unfinished candles in the dataframe.
The use of incomplete data for making strategy decisions is called "repainting" and you might see other platforms allow this.
Freqtrade does not. Only complete/finished candle data is available in the dataframe.
### Customize Indicators
Buy and sell signals need indicators. You can add more indicators by extending the list contained in the method `populate_indicators()` from your strategy file.
Entry and exit signals need indicators. You can add more indicators by extending the list contained in the method `populate_indicators()` from your strategy file.
You should only add the indicators used in either `populate_entry_trend()`, `populate_exit_trend()`, or to populate another indicator, otherwise performance may suffer.
It's important to always return the dataframe without removing/modifying the columns `"open", "high", "low", "close", "volume"`, otherwise these fields would contain something unexpected.
It's important to always return the dataframe from these three functions without removing/modifying the columns `"open", "high", "low", "close", "volume"`, otherwise these fields would contain something unexpected.
Sample:
@@ -124,7 +194,7 @@ def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame
stoch = ta.STOCHF(dataframe)
dataframe['fastd'] = stoch['fastd']
dataframe['fastk'] = stoch['fastk']
dataframe['blower'] = ta.BBANDS(dataframe, nbdevup=2, nbdevdn=2)['lowerband']
dataframe['bb_lower'] = ta.BBANDS(dataframe, nbdevup=2, nbdevdn=2)['lowerband']
dataframe['sma'] = ta.SMA(dataframe, timeperiod=40)
dataframe['tema'] = ta.TEMA(dataframe, timeperiod=9)
dataframe['mfi'] = ta.MFI(dataframe)
@@ -145,6 +215,8 @@ def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame
dataframe['plus_di'] = ta.PLUS_DI(dataframe)
dataframe['minus_dm'] = ta.MINUS_DM(dataframe)
dataframe['minus_di'] = ta.MINUS_DI(dataframe)
# remember to always return the dataframe
return dataframe
```
@@ -164,11 +236,13 @@ Additional technical libraries can be installed as necessary, or custom indicato
### Strategy startup period
Most indicators have an instable startup period, in which they are either not available (NaN), or the calculation is incorrect. This can lead to inconsistencies, since Freqtrade does not know how long this instable period should be.
Some indicators have an unstable startup period in which there isn't enough candle data to calculate any values (NaN), or the calculation is incorrect. This can lead to inconsistencies, since Freqtrade does not know how long this unstable period is and uses whatever indicator values are in the dataframe.
To account for this, the strategy can be assigned the `startup_candle_count` attribute.
This should be set to the maximum number of candles that the strategy requires to calculate stable indicators. In the case where a user includes higher timeframes with informative pairs, the `startup_candle_count` does not necessarily change. The value is the maximum period (in candles) that any of the informatives timeframes need to compute stable indicators.
You can use [recursive-analysis](recursive-analysis.md) to check and find the correct `startup_candle_count` to be used.
You can use [recursive-analysis](recursive-analysis.md) to check and find the correct `startup_candle_count` to be used. When recursive analysis shows a variance of 0%, then you can be sure that you have enough startup candle data.
In this example strategy, this should be set to 400 (`startup_candle_count = 400`), since the minimum needed history for ema100 calculation to make sure the value is correct is 400 candles.
@@ -195,19 +269,22 @@ Let's try to backtest 1 month (January 2019) of 5m candles using an example stra
freqtrade backtesting --timerange 20190101-20190201 --timeframe 5m
```
Assuming `startup_candle_count` is set to 400, backtesting knows it needs 400 candles to generate valid buy signals. It will load data from `20190101 - (400 * 5m)` - which is ~2018-12-30 11:40:00.
If this data is available, indicators will be calculated with this extended timerange. The instable startup period (up to 2019-01-01 00:00:00) will then be removed before starting backtesting.
Assuming `startup_candle_count` is set to 400, backtesting knows it needs 400 candles to generate valid entry signals. It will load data from `20190101 - (400 * 5m)` - which is ~2018-12-30 11:40:00.
!!! Note
If data for the startup period is not available, then the timerange will be adjusted to account for this startup period - so Backtesting would start at 2019-01-02 09:20:00.
If this data is available, indicators will be calculated with this extended timerange. The unstable startup period (up to 2019-01-01 00:00:00) will then be removed before backtesting is carried out.
!!! Note "Unavailable startup candle data"
If data for the startup period is not available, then the timerange will be adjusted to account for this startup period. In our example, backtesting would then start from 2019-01-02 09:20:00.
### Entry signal rules
Edit the method `populate_entry_trend()` in your strategy file to update your entry strategy.
It's important to always return the dataframe without removing/modifying the columns `"open", "high", "low", "close", "volume"`, otherwise these fields would contain something unexpected.
It's important to always return the dataframe without removing/modifying the columns `"open", "high", "low", "close", "volume"`, otherwise these fields would contain something unexpected. The strategy may then produce invalid values, or cease to work entirely.
This method will also define a new column, `"enter_long"` (`"enter_short"` for shorts), which needs to contain 1 for entries, and 0 for "no action". `enter_long` is a mandatory column that must be set even if the strategy is shorting only.
This method will also define a new column, `"enter_long"` (`"enter_short"` for shorts), which needs to contain `1` for entries, and `0` for "no action". `enter_long` is a mandatory column that must be set even if the strategy is shorting only.
You can name your entry signals by using the `"enter_tag"` column, which can help debug and assess your strategy later.
Sample from `user_data/strategies/sample_strategy.py`:
@@ -232,12 +309,15 @@ def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFram
```
??? Note "Enter short trades"
Short-entries can be created by setting `enter_short` (corresponds to `enter_long` for long trades).
Short entries can be created by setting `enter_short` (corresponds to `enter_long` for long trades).
The `enter_tag` column remains identical.
Short-trades need to be supported by your exchange and market configuration!
Please make sure to set [`can_short`]() appropriately on your strategy if you intend to short.
Shorting needs to be supported by your exchange and market configuration!
Also, make sure you set [`can_short`](#can-short) appropriately on your strategy if you intend to short.
```python
# allow both long and short trades
can_short = True
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
@@ -261,17 +341,21 @@ def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFram
```
!!! Note
Buying requires sellers to buy from - therefore volume needs to be > 0 (`dataframe['volume'] > 0`) to make sure that the bot does not buy/sell in no-activity periods.
Buying requires sellers to buy from. Therefore volume needs to be > 0 (`dataframe['volume'] > 0`) to make sure that the bot does not buy/sell in no-activity periods.
### Exit signal rules
Edit the method `populate_exit_trend()` into your strategy file to update your exit strategy.
The exit-signal can be suppressed by setting `use_exit_signal` to false in the configuration or strategy.
`use_exit_signal` will not influence [signal collision rules](#colliding-signals) - which will still apply and can prevent entries.
It's important to always return the dataframe without removing/modifying the columns `"open", "high", "low", "close", "volume"`, otherwise these fields would contain something unexpected.
It's important to always return the dataframe without removing/modifying the columns `"open", "high", "low", "close", "volume"`, otherwise these fields would contain something unexpected. The strategy may then produce invalid values, or cease to work entirely.
This method will also define a new column, `"exit_long"` (`"exit_short"` for shorts), which needs to contain 1 for exits, and 0 for "no action".
This method will also define a new column, `"exit_long"` (`"exit_short"` for shorts), which needs to contain `1` for exits, and `0` for "no action".
You can name your exit signals by using the `"exit_tag"` column, which can help debug and assess your strategy later.
Sample from `user_data/strategies/sample_strategy.py`:
@@ -295,11 +379,15 @@ def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame
```
??? Note "Exit short trades"
Short-exits can be created by setting `exit_short` (corresponds to `exit_long`).
Short exits can be created by setting `exit_short` (corresponds to `exit_long`).
The `exit_tag` column remains identical.
Short-trades need to be supported by your exchange and market configuration!
Shorting needs to be supported by your exchange and market configuration!
Also, make sure you set [`can_short`](#can-short) appropriately on your strategy if you intend to short.
```python
# allow both long and short trades
can_short = True
def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
@@ -322,9 +410,9 @@ def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame
### Minimal ROI
This dict defines the minimal Return On Investment (ROI) a trade should reach before exiting, independent from the exit signal.
The `minimal_roi` strategy variable defines the minimal Return On Investment (ROI) a trade should reach before exiting, independent from the exit signal.
It is of the following format, with the dict key (left side of the colon) being the minutes passed since the trade opened, and the value (right side of the colon) being the percentage.
It is of the following format, i.e. a python `dict`, with the dict key (left side of the colon) being the minutes passed since the trade opened, and the value (right side of the colon) being the percentage.
```python
minimal_roi = {
@@ -344,14 +432,19 @@ The above configuration would therefore mean:
The calculation does include fees.
#### Disabling minimal ROI
To disable ROI completely, set it to an empty dictionary:
```python
minimal_roi = {}
```
#### Using calculations in minimal ROI
To use times based on candle duration (timeframe), the following snippet can be handy.
This will allow you to change the timeframe for the strategy, and ROI times will still be set as candles (e.g. after 3 candles ...)
This will allow you to change the timeframe for the strategy, but the minimal ROI times will still be set as candles, e.g. after 3 candles.
``` python
from freqtrade.exchange import timeframe_to_minutes
@@ -368,9 +461,9 @@ class AwesomeStrategy(IStrategy):
```
??? info "Orders that don't fill immediately"
`minimal_roi` will take the `trade.open_date` as reference, which is the time the trade was initialized / the first order for this trade was placed.
This will also hold true for limit orders that don't fill immediately (usually in combination with "off-spot" prices through `custom_entry_price()`), as well as for cases where the initial order is replaced through `adjust_entry_price()`.
The time used will still be from the initial `trade.open_date` (when the initial order was first placed), not from the newly placed order date.
`minimal_roi` will take the `trade.open_date` as reference, which is the time the trade was initialized, i.e. when the first order for this trade was placed.
This will also hold true for limit orders that don't fill immediately (usually in combination with "off-spot" prices through `custom_entry_price()`), as well as for cases where the initial order price is replaced through `adjust_entry_price()`.
The time used will still be from the initial `trade.open_date` (when the initial order was first placed), not from the newly placed or adjusted order date.
### Stoploss
@@ -386,35 +479,44 @@ For the full documentation on stoploss features, look at the dedicated [stoploss
### Timeframe
This is the set of candles the bot should download and use for the analysis.
This is the periodicity of candles the bot should use in the strategy.
Common values are `"1m"`, `"5m"`, `"15m"`, `"1h"`, however all values supported by your exchange should work.
Please note that the same entry/exit signals may work well with one timeframe, but not with the others.
Please note that the same entry/exit signals may work well with one timeframe, but not with others.
This setting is accessible within the strategy methods as the `self.timeframe` attribute.
### Can short
To use short signals in futures markets, you will have to let us know to do so by setting `can_short=True`.
To use short signals in futures markets, you will have to set `can_short = True`.
Strategies which enable this will fail to load on spot markets.
Disabling of this will have short signals ignored (also in futures markets).
If you have `1` values in the `enter_short` column to raise short signals, setting `can_short = False` (which is the default) will mean that these short signals are ignored, even if you have specified futures markets in your configuration.
### Metadata dict
The metadata-dict (available for `populate_entry_trend`, `populate_exit_trend`, `populate_indicators`) contains additional information.
Currently this is `pair`, which can be accessed using `metadata['pair']` - and will return a pair in the format `XRP/BTC`.
The `metadata` dict (available for `populate_entry_trend`, `populate_exit_trend`, `populate_indicators`) contains additional information.
Currently this is `pair`, which can be accessed using `metadata['pair']`, and will return a pair in the format `XRP/BTC` (or `XRP/BTC:BTC` for futures markets).
The Metadata-dict should not be modified and does not persist information across multiple calls.
Instead, have a look at the [Storing information](strategy-advanced.md#storing-information-persistent) section.
The metadata dict should not be modified and does not persist information across multiple functions in your strategy.
Instead, please check the [Storing information](strategy-advanced.md#storing-information-persistent) section.
--8<-- "includes/strategy-imports.md"
## Strategy file loading
By default, freqtrade will attempt to load strategies from all `.py` files within `user_data/strategies`.
By default, freqtrade will attempt to load strategies from all `.py` files within the `userdir` (default `user_data/strategies`).
Assuming your strategy is called `AwesomeStrategy`, stored in the file `user_data/strategies/AwesomeStrategy.py`, then you can start freqtrade with `freqtrade trade --strategy AwesomeStrategy`.
Note that we're using the class-name, not the file name.
Assuming your strategy is called `AwesomeStrategy`, stored in the file `user_data/strategies/AwesomeStrategy.py`, then you can start freqtrade in dry (or live, depending on your configuration) mode with:
```bash
freqtrade trade --strategy AwesomeStrategy`
```
Note that we're using the class name, not the file name.
You can use `freqtrade list-strategies` to see a list of all strategies Freqtrade is able to load (all strategies in the correct folder).
It will also include a "status" field, highlighting potential problems.
@@ -426,9 +528,11 @@ It will also include a "status" field, highlighting potential problems.
### Get data for non-tradeable pairs
Data for additional, informative pairs (reference pairs) can be beneficial for some strategies.
Data for additional, informative pairs (reference pairs) can be beneficial for some strategies to see data on a wider timeframe.
OHLCV data for these pairs will be downloaded as part of the regular whitelist refresh process and is available via `DataProvider` just as other pairs (see below).
These parts will **not** be traded unless they are also specified in the pair whitelist, or have been selected by Dynamic Whitelisting.
These pairs will **not** be traded unless they are also specified in the pair whitelist, or have been selected by Dynamic Whitelisting, e.g. `VolumePairlist`.
The pairs need to be specified as tuples in the format `("pair", "timeframe")`, with pair as the first and timeframe as the second argument.
@@ -468,18 +572,24 @@ A full sample can be found [in the DataProvider section](#complete-data-provider
### Informative pairs decorator (`@informative()`)
In most common case it is possible to easily define informative pairs by using a decorator. All decorated `populate_indicators_*` methods run in isolation,
not having access to data from other informative pairs, in the end all informative dataframes are merged and passed to main `populate_indicators()` method.
When hyperopting, use of hyperoptable parameter `.value` attribute is not supported. Please use `.range` attribute. See [optimizing an indicator parameter](hyperopt.md#optimizing-an-indicator-parameter)
for more information.
To easily define informative pairs, use the `@informative` decorator. All decorated `populate_indicators_*` methods run in isolation,
and do not have access to data from other informative pairs. However, all informative dataframes for each pair are merged and passed to main `populate_indicators()` method.
!!! Note
Do not use the `@informative` decorator if you need to use data from one informative pair when generating another informative pair. Instead, define informative pairs manually as described [in the DataProvider section](#complete-data-provider-sample).
When hyperopting, use of the hyperoptable parameter `.value` attribute is not supported. Please use the `.range` attribute. See [optimizing an indicator parameter](hyperopt.md#optimizing-an-indicator-parameter) for more information.
??? info "Full documentation"
``` python
def informative(timeframe: str, asset: str = '',
fmt: Optional[Union[str, Callable[[KwArg(str)], str]]] = None,
*,
candle_type: Optional[CandleType] = None,
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
def informative(
timeframe: str,
asset: str = "",
fmt: str | Callable[[Any], str] | None = None,
*,
candle_type: CandleType | str | None = None,
ffill: bool = True,
) -> Callable[[PopulateIndicators], PopulateIndicators]:
"""
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
define informative indicators.
@@ -568,10 +678,6 @@ for more information.
```
!!! Note
Do not use `@informative` decorator if you need to use data of one informative pair when generating another informative pair. Instead, define informative pairs
manually as described [in the DataProvider section](#complete-data-provider-sample).
!!! Note
Use string formatting when accessing informative dataframes of other pairs. This will allow easily changing stake currency in config without having to adjust strategy code.
@@ -592,18 +698,15 @@ for more information.
Alternatively column renaming may be used to remove stake currency from column names: `@informative('1h', 'BTC/{stake}', fmt='{base}_{column}_{timeframe}')`.
!!! Warning "Duplicate method names"
Methods tagged with `@informative()` decorator must always have unique names! Re-using same name (for example when copy-pasting already defined informative method)
will overwrite previously defined method and not produce any errors due to limitations of Python programming language. In such cases you will find that indicators
created in earlier-defined methods are not available in the dataframe. Carefully review method names and make sure they are unique!
Methods tagged with the `@informative()` decorator must always have unique names! Reusing the same name (for example when copy-pasting already defined informative methods) will overwrite previously defined methods and not produce any errors due to limitations of Python programming language. In such cases you will find that indicators created in methods higher up in the strategy file are not available in the dataframe. Carefully review method names and make sure they are unique!
### *merge_informative_pair()*
This method helps you merge an informative pair to a regular dataframe without lookahead bias.
It's there to help you merge the dataframe in a safe and consistent way.
This method helps you merge an informative pair to the regular main dataframe safely and consistently, without lookahead bias.
Options:
- Rename the columns for you to create unique columns
- Rename the columns to create unique columns
- Merge the dataframe without lookahead bias
- Forward-fill (optional)
@@ -654,20 +757,20 @@ All columns of the informative dataframe will be available on the returning data
```
!!! Warning "Informative timeframe < timeframe"
Using informative timeframes smaller than the dataframe timeframe is not recommended with this method, as it will not use any of the additional information this would provide.
To use the more detailed information properly, more advanced methods should be applied (which are out of scope for freqtrade documentation, as it'll depend on the respective need).
Using informative timeframes smaller than the main dataframe timeframe is not recommended with this method, as it will not use any of the additional information this would provide.
To use the more detailed information properly, more advanced methods should be applied (which are out of scope for this documentation).
## Additional data (DataProvider)
The strategy provides access to the `DataProvider`. This allows you to get additional data to use in your strategy.
All methods return `None` in case of failure (do not raise an exception).
All methods return `None` in case of failure, i.e. failures do not raise an exception.
Please always check the mode of operation to select the correct method to get data (samples see below).
Please always check the mode of operation to select the correct method to get data (see below for examples).
!!! Warning "Hyperopt"
Dataprovider is available during hyperopt, however it can only be used in `populate_indicators()` within a strategy.
It is not available in `populate_buy()` and `populate_sell()` methods, nor in `populate_indicators()`, if this method located in the hyperopt file.
!!! Warning "Hyperopt Limitations"
The DataProvider is available during hyperopt, however it can only be used in `populate_indicators()` **within a strategy**, not within a hyperopt class file.
It is also not available in `populate_entry_trend()` and `populate_exit_trend()` methods.
### Possible options for DataProvider
@@ -693,31 +796,31 @@ for pair, timeframe in self.dp.available_pairs:
### *current_whitelist()*
Imagine you've developed a strategy that trades the `5m` timeframe using signals generated from a `1d` timeframe on the top 10 volume pairs by volume.
Imagine you've developed a strategy that trades the `5m` timeframe using signals generated from a `1d` timeframe on the top 10 exchange pairs by volume.
The strategy might look something like this:
The strategy logic might look something like this:
*Scan through the top 10 pairs by volume using the `VolumePairList` every 5 minutes and use a 14 day RSI to buy and sell.*
*Scan through the top 10 pairs by volume using the `VolumePairList` every 5 minutes and use a 14 day RSI to enter and exit.*
Due to the limited available data, it's very difficult to resample `5m` candles into daily candles for use in a 14 day RSI. Most exchanges limit us to just 500-1000 candles which effectively gives us around 1.74 daily candles. We need 14 days at least!
Due to the limited available data, it's very difficult to resample `5m` candles into daily candles for use in a 14 day RSI. Most exchanges limit users to just 500-1000 candles which effectively gives us around 1.74 daily candles. We need 14 days at least!
Since we can't resample the data we will have to use an informative pair; and since the whitelist will be dynamic we don't know which pair(s) to use.
Since we can't resample the data we will have to use an informative pair, and since the whitelist will be dynamic we don't know which pair(s) to use! We have a problem!
This is where calling `self.dp.current_whitelist()` comes in handy.
This is where calling `self.dp.current_whitelist()` comes in handy to retrieve only those pairs in the whitelist.
```python
def informative_pairs(self):
# get access to all pairs available in whitelist.
pairs = self.dp.current_whitelist()
# Assign tf to each pair so they can be downloaded and cached for strategy.
# Assign timeframe to each pair so they can be downloaded and cached for strategy.
informative_pairs = [(pair, '1d') for pair in pairs]
return informative_pairs
```
??? Note "Plotting with current_whitelist"
Current whitelist is not supported for `plot-dataframe`, as this command is usually used by providing an explicit pairlist - and would therefore make the return values of this method misleading.
It's also not supported for freqUI visualization in [webserver mode](utils.md#webserver-mode) - as the configuration for webserver mode doesn't require a pairlist to be set.
Current whitelist is not supported for `plot-dataframe`, as this command is usually used by providing an explicit pairlist and would therefore make the return values of this method misleading.
It's also not supported for FreqUI visualization in [webserver mode](utils.md#webserver-mode), as the configuration for webserver mode doesn't require a pairlist to be set.
### *get_pair_dataframe(pair, timeframe)*
@@ -758,7 +861,7 @@ if self.dp.runmode.value in ('live', 'dry_run'):
dataframe['best_ask'] = ob['asks'][0][0]
```
The orderbook structure is aligned with the order structure from [ccxt](https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure), so the result will look as follows:
The orderbook structure is aligned with the order structure from [ccxt](https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure), so the result will be formatted as follows:
``` js
{
@@ -776,7 +879,7 @@ The orderbook structure is aligned with the order structure from [ccxt](https://
}
```
Therefore, using `ob['bids'][0][0]` as demonstrated above will result in using the best bid price. `ob['bids'][0][1]` would look at the amount at this orderbook position.
Therefore, using `ob['bids'][0][0]` as demonstrated above will use the best bid price. `ob['bids'][0][1]` would look at the amount at this orderbook position.
!!! Warning "Warning about backtesting"
The order book is not part of the historic data which means backtesting and hyperopt will not work correctly if this method is used, as the method will return up-to-date values.
@@ -793,12 +896,12 @@ if self.dp.runmode.value in ('live', 'dry_run'):
!!! Warning
Although the ticker data structure is a part of the ccxt Unified Interface, the values returned by this method can
vary for different exchanges. For instance, many exchanges do not return `vwap` values, some exchanges
does not always fills in the `last` field (so it can be None), etc. So you need to carefully verify the ticker
vary for different exchanges. For instance, many exchanges do not return `vwap` values, and some exchanges
do not always fill in the `last` field (so it can be None), etc. So you need to carefully verify the ticker
data returned from the exchange and add appropriate error handling / defaults.
!!! Warning "Warning about backtesting"
This method will always return up-to-date values - so usage during backtesting / hyperopt without runmode checks will lead to wrong results.
This method will always return up-to-date / real-time values. As such, usage during backtesting / hyperopt without runmode checks will lead to wrong results, e.g. your whole dataframe will contain the same single value in all rows.
### Send Notification
@@ -817,7 +920,7 @@ Notifications will only be sent in trading modes (Live/Dry-run) - so this method
!!! Warning "Spamming"
You can spam yourself pretty good by setting `always_send=True` in this method. Use this with great care and only in conditions you know will not happen throughout a candle to avoid a message every 5 seconds.
### Complete Data-provider sample
### Complete DataProvider sample
```python
from freqtrade.strategy import IStrategy, merge_informative_pair
@@ -884,14 +987,14 @@ class SampleStrategy(IStrategy):
## Additional data (Wallets)
The strategy provides access to the `wallets` object. This contains the current balances on the exchange.
The strategy provides access to the `wallets` object. This contains the current balances of your wallets/accounts on the exchange.
!!! Note "Backtesting / Hyperopt"
Wallets behaves differently depending on the function it's called.
Wallets behaves differently depending on the function from which it is called.
Within `populate_*()` methods, it'll return the full wallet as configured.
Within [callbacks](strategy-callbacks.md), you'll get the wallet state corresponding to the actual simulated wallet at that point in the simulation process.
Please always check if `wallets` is available to avoid failures during backtesting.
Always check if `wallets` is available to avoid failures during backtesting.
``` python
if self.wallets:
@@ -910,15 +1013,15 @@ if self.wallets:
## Additional data (Trades)
A history of Trades can be retrieved in the strategy by querying the database.
A history of trades can be retrieved in the strategy by querying the database.
At the top of the file, import Trade.
At the top of the file, import the required object:
```python
from freqtrade.persistence import Trade
```
The following example queries for the current pair and trades from today, however other filters can easily be added.
The following example queries trades from today for the current pair (`metadata['pair']`). Other filters can easily be added.
``` python
trades = Trade.get_trades_proxy(pair=metadata['pair'],
@@ -936,7 +1039,9 @@ For a full list of available methods, please consult the [Trade object](trade-ob
## Prevent trades from happening for a specific pair
Freqtrade locks pairs automatically for the current candle (until that candle is over) when a pair is sold, preventing an immediate re-buy of that pair.
Freqtrade locks pairs automatically for the current candle (until that candle is over) when a pair exits, preventing an immediate re-entry of that pair.
This is to prevent "waterfalls" of many and frequent trades within a single candle.
Locked pairs will show the message `Pair <pair> is currently locked.`.
@@ -947,7 +1052,7 @@ Sometimes it may be desired to lock a pair after certain events happen (e.g. mul
Freqtrade has an easy method to do this from within the strategy, by calling `self.lock_pair(pair, until, [reason])`.
`until` must be a datetime object in the future, after which trading will be re-enabled for that pair, while `reason` is an optional string detailing why the pair was locked.
Locks can also be lifted manually, by calling `self.unlock_pair(pair)` or `self.unlock_reason(<reason>)` - providing reason the pair was locked with.
Locks can also be lifted manually, by calling `self.unlock_pair(pair)` or `self.unlock_reason(<reason>)`, providing the reason the pair was unlocked.
`self.unlock_reason(<reason>)` will unlock all pairs currently locked with the provided reason.
To verify if a pair is currently locked, use `self.is_pair_locked(pair)`.
@@ -956,7 +1061,7 @@ To verify if a pair is currently locked, use `self.is_pair_locked(pair)`.
Locked pairs will always be rounded up to the next candle. So assuming a `5m` timeframe, a lock with `until` set to 10:18 will lock the pair until the candle from 10:15-10:20 will be finished.
!!! Warning
Manually locking pairs is not available during backtesting, only locks via Protections are allowed.
Manually locking pairs is not available during backtesting. Only locks via Protections are allowed.
#### Pair locking example
@@ -966,7 +1071,7 @@ from datetime import timedelta, datetime, timezone
# Put the above lines a the top of the strategy file, next to all the other imports
# --------
# Within populate indicators (or populate_buy):
# Within populate indicators (or populate_entry_trend):
if self.config['runmode'].value in ('live', 'dry_run'):
# fetch closed trades for the last 2 days
trades = Trade.get_trades_proxy(
@@ -979,9 +1084,9 @@ if self.config['runmode'].value in ('live', 'dry_run'):
self.lock_pair(metadata['pair'], until=datetime.now(timezone.utc) + timedelta(hours=12))
```
## Print created dataframe
## Print the main dataframe
To inspect the created dataframe, you can issue a print-statement in either `populate_entry_trend()` or `populate_exit_trend()`.
To inspect the current main dataframe, you can issue a print-statement in either `populate_entry_trend()` or `populate_exit_trend()`.
You may also want to print the pair so it's clear what data is currently shown.
``` python
@@ -1001,29 +1106,30 @@ def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFram
return dataframe
```
Printing more than a few rows is also possible (simply use `print(dataframe)` instead of `print(dataframe.tail())`), however not recommended, as that will be very verbose (~500 lines per pair every 5 seconds).
Printing more than a few rows is also possible by using `print(dataframe)` instead of `print(dataframe.tail())`. However this is not recommended, as can results in a lot of output (~500 lines per pair every 5 seconds).
## Common mistakes when developing strategies
### Peeking into the future while backtesting
### Looking into the future while backtesting
Backtesting analyzes the whole time-range at once for performance reasons. Because of this, strategy authors need to make sure that strategies do not look-ahead into the future.
This is a common pain-point, which can cause huge differences between backtesting and dry/live run methods, since they all use data which is not available during dry/live runs, so these strategies will perform well during backtesting, but will fail / perform badly in real conditions.
Backtesting analyzes the whole dataframe timerange at once for performance reasons. Because of this, strategy authors need to make sure that strategies do not lookahead into the future, i.e. using data that would not be available in dry or live mode.
The following lists some common patterns which should be avoided to prevent frustration:
This is a common pain-point, which can cause huge differences between backtesting and dry/live run methods. Strategies that look into the future will perform well during backtesting, often with incredible profits or winrates, but will fail or perform badly in real conditions.
The following list contains some common patterns which should be avoided to prevent frustration:
- don't use `shift(-1)` or other negative values. This uses data from the future in backtesting, which is not available in dry or live modes.
- don't use `.iloc[-1]` or any other absolute position in the dataframe within `populate_` functions, as this will be different between dry-run and backtesting. Absolute `iloc` indexing is safe to use in callbacks however - see [Strategy Callbacks](strategy-callbacks.md).
- don't use `dataframe['volume'].mean()`. This uses the full DataFrame for backtesting, including data from the future. Use `dataframe['volume'].rolling(<window>).mean()` instead
- don't use `.resample('1h')`. This uses the left border of the interval, so moves data from an hour to the start of the hour. Use `.resample('1h', label='right')` instead.
- don't use functions that use all dataframe or column values, e.g. `dataframe['mean_volume'] = dataframe['volume'].mean()`. As backtesting uses the full dataframe, at any point in the dataframe, the `'mean_volume'` series would include data from the future. Use rolling() calculations instead, e.g. `dataframe['volume'].rolling(<window>).mean()`.
- don't use `.resample('1h')`. This uses the left border of the period interval, so moves data from an hour boundary to the start of the hour. Use `.resample('1h', label='right')` instead.
!!! Tip "Identifying problems"
You may also want to check the 2 helper commands [lookahead-analysis](lookahead-analysis.md) and [recursive-analysis](recursive-analysis.md), which can each help you figure out problems with your strategy in different ways.
Please treat them as what they are - helpers to identify most common problems. A negative result of each does not guarantee that there's none of the above errors included.
You should always use the two helper commands [lookahead-analysis](lookahead-analysis.md) and [recursive-analysis](recursive-analysis.md), which can each help you figure out problems with your strategy in different ways.
Please treat them as what they are - helpers to identify most common problems. A negative result of each does not guarantee that there are none of the above errors included.
### Colliding signals
When conflicting signals collide (e.g. both `'enter_long'` and `'exit_long'` are 1), freqtrade will do nothing and ignore the entry signal. This will avoid trades that enter, and exit immediately. Obviously, this can potentially lead to missed entries.
When conflicting signals collide (e.g. both `'enter_long'` and `'exit_long'` are set to `1`), freqtrade will do nothing and ignore the entry signal. This will avoid trades that enter, and exit immediately. Obviously, this can potentially lead to missed entries.
The following rules apply, and entry signals will be ignored if more than one of the 3 signals is set:
@@ -1032,11 +1138,11 @@ The following rules apply, and entry signals will be ignored if more than one of
## Further strategy ideas
To get additional Ideas for strategies, head over to the [strategy repository](https://github.com/freqtrade/freqtrade-strategies). Feel free to use them as they are - but results will depend on the current market situation, pairs used etc. - therefore please backtest the strategy for your exchange/desired pairs first, evaluate carefully, use at your own risk.
Feel free to use any of them as inspiration for your own strategies.
We're happy to accept Pull Requests containing new Strategies to that repo.
To get additional ideas for strategies, head over to the [strategy repository](https://github.com/freqtrade/freqtrade-strategies). Feel free to use them as examples, but results will depend on the current market situation, pairs used, etc. Therefore, these strategies should be considered only for learning purposes, not real world trading. Please backtest the strategy for your exchange/desired pairs first, then dry run to evaluate carefully, and use at your own risk.
## Next step
Feel free to use any of them as inspiration for your own strategies. We're happy to accept Pull Requests containing new strategies to the repository.
## Next steps
Now you have a perfect strategy you probably want to backtest it.
Your next step is to learn [How to use the Backtesting](backtesting.md).
Your next step is to learn [how to use backtesting](backtesting.md).

View File

@@ -215,7 +215,7 @@ trades.groupby("pair")["exit_reason"].value_counts()
```
## Analyze the loaded trades for trade parallelism
This can be useful to find the best `max_open_trades` parameter, when used with backtesting in conjunction with `--disable-max-market-positions`.
This can be useful to find the best `max_open_trades` parameter, when used with backtesting in conjunction with a very high `max_open_trades` setting.
`analyze_trade_parallelism()` returns a timeseries dataframe with an "open_trades" column, specifying the number of open trades for each candle.

View File

@@ -214,8 +214,8 @@ class AwesomeStrategy(IStrategy):
``` python hl_lines="4"
class AwesomeStrategy(IStrategy):
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
proposed_stake: float, min_stake: Optional[float], max_stake: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
proposed_stake: float, min_stake: float | None, max_stake: float,
entry_tag: str | None, side: str, **kwargs) -> float:
# ...
return proposed_stake
```
@@ -237,7 +237,7 @@ After:
``` python hl_lines="4"
class AwesomeStrategy(IStrategy):
def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float,
time_in_force: str, current_time: datetime, entry_tag: Optional[str],
time_in_force: str, current_time: datetime, entry_tag: str | None,
side: str, **kwargs) -> bool:
return True
```
@@ -280,8 +280,8 @@ After:
``` python hl_lines="3"
class AwesomeStrategy(IStrategy):
def custom_entry_price(self, pair: str, trade: Optional[Trade], current_time: datetime, proposed_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
def custom_entry_price(self, pair: str, trade: Trade | None, current_time: datetime, proposed_rate: float,
entry_tag: str | None, side: str, **kwargs) -> float:
return proposed_rate
```
@@ -312,7 +312,7 @@ After:
``` python hl_lines="5 7"
def custom_stoploss(self, pair: str, trade: 'Trade', current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# once the profit has risen above 10%, keep the stoploss at 7% above the open price
if current_profit > 0.10:
return stoploss_from_open(0.07, current_profit, is_short=trade.is_short)
@@ -329,7 +329,7 @@ After:
`order_time_in_force` attributes changed from `"buy"` to `"entry"` and `"sell"` to `"exit"`.
``` python
order_time_in_force: Dict = {
order_time_in_force: dict = {
"buy": "gtc",
"sell": "gtc",
}
@@ -338,7 +338,7 @@ After:
After:
``` python hl_lines="2 3"
order_time_in_force: Dict = {
order_time_in_force: dict = {
"entry": "GTC",
"exit": "GTC",
}
@@ -780,7 +780,7 @@ class MyCoolFreqaiModel(BaseRegressionModel):
def predict(
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
) -> tuple[DataFrame, npt.NDArray[np.int_]]:
# ... your custom stuff

View File

@@ -58,6 +58,7 @@ For the Freqtrade configuration, you can then use the full value (including `-`
```json
"chat_id": "-1001332619709"
```
!!! Warning "Using telegram groups"
When using telegram groups, you're giving every member of the telegram group access to your freqtrade bot and to all commands possible via telegram. Please make sure that you can trust everyone in the telegram group to avoid unpleasant surprises.
@@ -93,9 +94,12 @@ Example configuration showing the different settings:
"trailing_stop_loss": "on",
"stop_loss": "on",
"stoploss_on_exchange": "on",
"custom_exit": "silent",
"partial_exit": "on"
"custom_exit": "silent", // custom_exit without specifying an exit reason
"partial_exit": "on",
// "custom_exit_message": "silent", // Disable individual custom exit reasons
"*": "off" // Disable all other exit reasons
},
// "exit": "off", // Simplistic configuration to disable all exit messages
"exit_cancel": "on",
"exit_fill": "off",
"protection_trigger": "off",
@@ -108,16 +112,16 @@ Example configuration showing the different settings:
},
```
`entry` notifications are sent when the order is placed, while `entry_fill` notifications are sent when the order is filled on the exchange.
`exit` notifications are sent when the order is placed, while `exit_fill` notifications are sent when the order is filled on the exchange.
`*_fill` notifications are off by default and must be explicitly enabled.
`protection_trigger` notifications are sent when a protection triggers and `protection_trigger_global` notifications trigger when global protections are triggered.
`strategy_msg` - Receive notifications from the strategy, sent via `self.dp.send_msg()` from the strategy [more details](strategy-customization.md#send-notification).
`show_candle` - show candle values as part of entry/exit messages. Only possible values are `"ohlc"` or `"off"`.
`balance_dust_level` will define what the `/balance` command takes as "dust" - Currencies with a balance below this will be shown.
`allow_custom_messages` completely disable strategy messages.
`reload` allows you to disable reload-buttons on selected messages.
* `entry` notifications are sent when the order is placed, while `entry_fill` notifications are sent when the order is filled on the exchange.
* `exit` notifications are sent when the order is placed, while `exit_fill` notifications are sent when the order is filled on the exchange.
Exit messages (`exit` and `exit_fill`) can be further controlled at individual exit reasons level, with the specific exit reason as the key. the default for all exit reasons is `on` - but can be configured via special `*` key - which will act as a wildcard for all exit reasons that are not explicitly defined.
* `*_fill` notifications are off by default and must be explicitly enabled.
* `protection_trigger` notifications are sent when a protection triggers and `protection_trigger_global` notifications trigger when global protections are triggered.
* `strategy_msg` - Receive notifications from the strategy, sent via `self.dp.send_msg()` from the strategy [more details](strategy-customization.md#send-notification).
* `show_candle` - show candle values as part of entry/exit messages. Only possible values are `"ohlc"` or `"off"`.
* `balance_dust_level` will define what the `/balance` command takes as "dust" - Currencies with a balance below this will be shown.
* `allow_custom_messages` completely disable strategy messages.
* `reload` allows you to disable reload-buttons on selected messages.
## Create a custom keyboard (command shortcut buttons)
@@ -238,16 +242,16 @@ Once all positions are sold, run `/stop` to completely stop the bot.
For each open trade, the bot will send you the following message.
Enter Tag is configurable via Strategy.
> **Trade ID:** `123` `(since 1 days ago)`
> **Current Pair:** CVC/BTC
> **Direction:** Long
> **Leverage:** 1.0
> **Amount:** `26.64180098`
> **Enter Tag:** Awesome Long Signal
> **Open Rate:** `0.00007489`
> **Current Rate:** `0.00007489`
> **Unrealized Profit:** `12.95%`
> **Stoploss:** `0.00007389 (-0.02%)`
> **Trade ID:** `123` `(since 1 days ago)`
> **Current Pair:** CVC/BTC
> **Direction:** Long
> **Leverage:** 1.0
> **Amount:** `26.64180098`
> **Enter Tag:** Awesome Long Signal
> **Open Rate:** `0.00007489`
> **Current Rate:** `0.00007489`
> **Unrealized Profit:** `12.95%`
> **Stoploss:** `0.00007389 (-0.02%)`
### /status table
@@ -274,34 +278,34 @@ current max
Return a summary of your profit/loss and performance.
> **ROI:** Close trades
> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)`
> ∙ `62.968 USD`
> **ROI:** All trades
> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)`
> ∙ `33.095 EUR`
>
> **Total Trade Count:** `138`
> **Bot started:** `2022-07-11 18:40:44`
> **First Trade opened:** `3 days ago`
> **Latest Trade opened:** `2 minutes ago`
> **Avg. Duration:** `2:33:45`
> **Best Performing:** `PAY/BTC: 50.23%`
> **Trading volume:** `0.5 BTC`
> **Profit factor:** `1.04`
> **Win / Loss:** `102 / 36`
> **Winrate:** `73.91%`
> **Expectancy (Ratio):** `4.87 (1.66)`
> **Max Drawdown:** `9.23% (0.01255 BTC)`
> **ROI:** Close trades
> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)`
> ∙ `62.968 USD`
> **ROI:** All trades
> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)`
> ∙ `33.095 EUR`
>
> **Total Trade Count:** `138`
> **Bot started:** `2022-07-11 18:40:44`
> **First Trade opened:** `3 days ago`
> **Latest Trade opened:** `2 minutes ago`
> **Avg. Duration:** `2:33:45`
> **Best Performing:** `PAY/BTC: 50.23%`
> **Trading volume:** `0.5 BTC`
> **Profit factor:** `1.04`
> **Win / Loss:** `102 / 36`
> **Winrate:** `73.91%`
> **Expectancy (Ratio):** `4.87 (1.66)`
> **Max Drawdown:** `9.23% (0.01255 BTC)`
The relative profit of `1.2%` is the average profit per trade.
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
Expectancy corresponds to the average return per currency unit at risk, i.e. the winrate and the risk-reward ratio (the average gain of winning trades compared to the average loss of losing trades).
Expectancy Ratio is expected profit or loss of a subsequent trade based on the performance of all past trades.
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
Bot started date will refer to the date the bot was first started. For older bots, this will default to the first trade's open date.
The relative profit of `1.2%` is the average profit per trade.
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
**Starting capital(**) is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
**Profit Factor** is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
**Expectancy** corresponds to the average return per currency unit at risk, i.e. the winrate and the risk-reward ratio (the average gain of winning trades compared to the average loss of losing trades).
**Expectancy Ratio** is expected profit or loss of a subsequent trade based on the performance of all past trades.
**Max drawdown** corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
**Bot started date** will refer to the date the bot was first started. For older bots, this will default to the first trade's open date.
### /forceexit <trade_id>
@@ -329,33 +333,34 @@ Note that for this to work, `force_entry_enable` needs to be set to true.
### /performance
Return the performance of each crypto-currency the bot has sold.
> Performance:
> 1. `RCN/BTC 0.003 BTC (57.77%) (1)`
> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)`
> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)`
> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)`
> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)`
> ...
> Performance:
> 1. `RCN/BTC 0.003 BTC (57.77%) (1)`
> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)`
> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)`
> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)`
> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)`
> ...
### /balance
Return the balance of all crypto-currency your have on the exchange.
> **Currency:** BTC
> **Available:** 3.05890234
> **Balance:** 3.05890234
> **Pending:** 0.0
> **Currency:** CVC
> **Available:** 86.64180098
> **Balance:** 86.64180098
> **Pending:** 0.0
> **Currency:** BTC
> **Available:** 3.05890234
> **Balance:** 3.05890234
> **Pending:** 0.0
>
> **Currency:** CVC
> **Available:** 86.64180098
> **Balance:** 86.64180098
> **Pending:** 0.0
### /daily <n>
Per default `/daily` will return the 7 last days. The example below if for `/daily 3`:
> **Daily Profit over the last 3 days:**
```
Day (count) USDT USD Profit %
-------------- ------------ ---------- ----------
@@ -370,6 +375,7 @@ Per default `/weekly` will return the 8 last weeks, including the current week.
from Monday. The example below if for `/weekly 3`:
> **Weekly Profit over the last 3 weeks (starting from Monday):**
```
Monday (count) Profit BTC Profit USD Profit %
------------- -------------- ------------ ----------
@@ -396,18 +402,18 @@ Month (count) Profit BTC Profit USD Profit %
Shows the current whitelist
> Using whitelist `StaticPairList` with 22 pairs
> Using whitelist `StaticPairList` with 22 pairs
> `IOTA/BTC, NEO/BTC, TRX/BTC, VET/BTC, ADA/BTC, ETC/BTC, NCASH/BTC, DASH/BTC, XRP/BTC, XVG/BTC, EOS/BTC, LTC/BTC, OMG/BTC, BTG/BTC, LSK/BTC, ZEC/BTC, HOT/BTC, IOTX/BTC, XMR/BTC, AST/BTC, XLM/BTC, NANO/BTC`
### /blacklist [pair]
Shows the current blacklist.
If Pair is set, then this pair will be added to the pairlist.
Also supports multiple pairs, separated by a space.
Also supports multiple pairs, separated by a space.
Use `/reload_config` to reset the blacklist.
> Using blacklist `StaticPairList` with 2 pairs
>`DODGE/BTC`, `HOT/BTC`.
> Using blacklist `StaticPairList` with 2 pairs
>`DODGE/BTC`, `HOT/BTC`.
### /edge

View File

@@ -143,6 +143,7 @@ Most properties here can be None as they are dependent on the exchange response.
| `remaining` | float | Remaining amount |
| `cost` | float | Cost of the order - usually average * filled (*Exchange dependent on futures, may contain the cost with or without leverage and may be in contracts.*) |
| `stake_amount` | float | Stake amount used for this order. *Added in 2023.7.* |
| `stake_amount_filled` | float | Filled Stake amount used for this order. *Added in 2024.11.* |
| `order_date` | datetime | Order creation date **use `order_date_utc` instead** |
| `order_date_utc` | datetime | Order creation date (in UTC) |
| `order_fill_date` | datetime | Order fill date **use `order_fill_utc` instead** |

View File

@@ -216,6 +216,45 @@ Example: Search dedicated strategy path.
freqtrade list-strategies --strategy-path ~/.freqtrade/strategies/
```
## List Hyperopt-Loss functions
Use the `list-hyperoptloss` subcommand to see all hyperopt loss functions available.
It provides a quick list of all available loss functions in your environment.
This subcommand can be useful for finding problems in your environment with loading loss functions: modules with Hyperopt-Loss functions that contain errors and failed to load are printed in red (LOAD FAILED), while hyperopt-Loss functions with duplicate names are printed in yellow (DUPLICATE NAME).
```
usage: freqtrade list-hyperoptloss [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH]
[--hyperopt-path PATH] [-1] [--no-color]
options:
-h, --help show this help message and exit
--hyperopt-path PATH Specify additional lookup path for Hyperopt Loss
functions.
-1, --one-column Print output in one column.
--no-color Disable colorization of hyperopt results. May be
useful if you are redirecting output to a file.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE, --log-file FILE
Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
-c PATH, --config PATH
Specify configuration file (default:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
```
## List freqAI models
Use the `list-freqaimodels` subcommand to see all freqAI models available.

View File

@@ -1,6 +1,6 @@
"""Freqtrade bot"""
__version__ = "2024.10-dev"
__version__ = "2024.11-dev"
if "dev" in __version__:
from pathlib import Path

View File

@@ -27,6 +27,7 @@ from freqtrade.commands.hyperopt_commands import start_hyperopt_list, start_hype
from freqtrade.commands.list_commands import (
start_list_exchanges,
start_list_freqAI_models,
start_list_hyperopt_loss_functions,
start_list_markets,
start_list_strategies,
start_list_timeframes,

View File

@@ -5,7 +5,7 @@ This module contains the argument manager class
from argparse import ArgumentParser, Namespace, _ArgumentGroup
from functools import partial
from pathlib import Path
from typing import Any, Optional, Union
from typing import Any
from freqtrade.commands.cli_options import AVAILABLE_CLI_OPTIONS
from freqtrade.constants import DEFAULT_CONFIG
@@ -37,7 +37,6 @@ ARGS_COMMON_OPTIMIZE = [
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
@@ -53,7 +52,6 @@ ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [
"hyperopt",
"hyperopt_path",
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
@@ -117,7 +115,7 @@ ARGS_CREATE_USERDIR = ["user_data_dir", "reset"]
ARGS_BUILD_CONFIG = ["config"]
ARGS_SHOW_CONFIG = ["user_data_dir", "config", "show_sensitive"]
ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "template"]
ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "strategy_path", "template"]
ARGS_CONVERT_DATA_TRADES = ["pairs", "format_from_trades", "format_to", "erase", "exchange"]
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"]
@@ -242,8 +240,7 @@ ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_s
ARGS_LOOKAHEAD_ANALYSIS = [
a
for a in ARGS_BACKTEST
if a
not in ("position_stacking", "use_max_market_positions", "backtest_cache", "backtest_breakdown")
if a not in ("position_stacking", "backtest_cache", "backtest_breakdown")
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
ARGS_RECURSIVE_ANALYSIS = ["timeframe", "timerange", "dataformat_ohlcv", "pairs", "startup_candle"]
@@ -258,6 +255,7 @@ NO_CONF_REQURIED = [
"list-pairs",
"list-strategies",
"list-freqaimodels",
"list-hyperoptloss",
"list-data",
"hyperopt-list",
"hyperopt-show",
@@ -277,9 +275,9 @@ class Arguments:
Arguments Class. Manage the arguments received by the cli
"""
def __init__(self, args: Optional[list[str]]) -> None:
def __init__(self, args: list[str] | None) -> None:
self.args = args
self._parsed_arg: Optional[Namespace] = None
self._parsed_arg: Namespace | None = None
def get_parsed_arg(self) -> dict[str, Any]:
"""
@@ -321,9 +319,7 @@ class Arguments:
return parsed_arg
def _build_args(
self, optionlist: list[str], parser: Union[ArgumentParser, _ArgumentGroup]
) -> None:
def _build_args(self, optionlist: list[str], parser: ArgumentParser | _ArgumentGroup) -> None:
for val in optionlist:
opt = AVAILABLE_CLI_OPTIONS[val]
parser.add_argument(*opt.cli, dest=val, **opt.kwargs)
@@ -365,6 +361,7 @@ class Arguments:
start_list_data,
start_list_exchanges,
start_list_freqAI_models,
start_list_hyperopt_loss_functions,
start_list_markets,
start_list_strategies,
start_list_timeframes,
@@ -566,6 +563,15 @@ class Arguments:
list_strategies_cmd.set_defaults(func=start_list_strategies)
self._build_args(optionlist=ARGS_LIST_STRATEGIES, parser=list_strategies_cmd)
# Add list-Hyperopt loss subcommand
list_hyperopt_loss_cmd = subparsers.add_parser(
"list-hyperoptloss",
help="Print available hyperopt loss functions.",
parents=[_common_parser],
)
list_hyperopt_loss_cmd.set_defaults(func=start_list_hyperopt_loss_functions)
self._build_args(optionlist=ARGS_LIST_HYPEROPTS, parser=list_hyperopt_loss_cmd)
# Add list-freqAI Models subcommand
list_freqaimodels_cmd = subparsers.add_parser(
"list-freqaimodels",

View File

@@ -168,14 +168,6 @@ AVAILABLE_CLI_OPTIONS = {
action="store_true",
default=False,
),
"use_max_market_positions": Arg(
"--dmmp",
"--disable-max-market-positions",
help="Disable applying `max_open_trades` during backtest "
"(same as setting `max_open_trades` to a very high number).",
action="store_false",
default=True,
),
"backtest_show_pair_list": Arg(
"--show-pair-list",
help="Show backtesting pairlist sorted by profit.",

View File

@@ -86,7 +86,14 @@ def start_new_strategy(args: dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if "strategy" in args and args["strategy"]:
new_path = config["user_data_dir"] / USERPATH_STRATEGIES / (args["strategy"] + ".py")
if "strategy_path" in args and args["strategy_path"]:
strategy_dir = Path(args["strategy_path"])
else:
strategy_dir = config["user_data_dir"] / USERPATH_STRATEGIES
if not strategy_dir.is_dir():
logger.info(f"Creating strategy directory {strategy_dir}")
strategy_dir.mkdir(parents=True)
new_path = strategy_dir / (args["strategy"] + ".py")
if new_path.exists():
raise OperationalException(

View File

@@ -1,6 +1,5 @@
import logging
from pathlib import Path
from typing import Optional
import requests
@@ -24,7 +23,7 @@ def clean_ui_subdir(directory: Path):
p.rmdir()
def read_ui_version(dest_folder: Path) -> Optional[str]:
def read_ui_version(dest_folder: Path) -> str | None:
file = dest_folder / ".uiversion"
if not file.is_file():
return None
@@ -52,7 +51,7 @@ def download_and_install_ui(dest_folder: Path, dl_url: str, version: str):
f.write(version)
def get_ui_download_url(version: Optional[str] = None) -> tuple[str, str]:
def get_ui_download_url(version: str | None = None) -> tuple[str, str]:
base_url = "https://api.github.com/repos/freqtrade/frequi/"
# Get base UI Repo path

View File

@@ -15,7 +15,7 @@ def start_hyperopt_list(args: dict[str, Any]) -> None:
"""
from freqtrade.configuration import setup_utils_configuration
from freqtrade.data.btanalysis import get_latest_hyperopt_file
from freqtrade.optimize.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt_tools import HyperoptTools
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)

View File

@@ -1,7 +1,7 @@
import csv
import logging
import sys
from typing import Any, Union
from typing import Any
from freqtrade.enums import RunMode
from freqtrade.exceptions import ConfigurationError, OperationalException
@@ -87,7 +87,7 @@ def _print_objs_tabular(objs: list, print_colorized: bool) -> None:
from rich.text import Text
names = [s["name"] for s in objs]
objs_to_print: list[dict[str, Union[Text, str]]] = [
objs_to_print: list[dict[str, Text | str]] = [
{
"name": Text(s["name"] if s["name"] else "--"),
"location": s["location_rel"],
@@ -169,6 +169,24 @@ def start_list_freqAI_models(args: dict[str, Any]) -> None:
_print_objs_tabular(model_objs, config.get("print_colorized", False))
def start_list_hyperopt_loss_functions(args: dict[str, Any]) -> None:
"""
Print files with FreqAI models custom classes available in the directory
"""
from freqtrade.configuration import setup_utils_configuration
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
model_objs = HyperOptLossResolver.search_all_objects(config, not args["print_one_column"])
# Sort alphabetically
model_objs = sorted(model_objs, key=lambda x: x["name"])
if args["print_one_column"]:
print("\n".join([s["name"] for s in model_objs]))
else:
_print_objs_tabular(model_objs, config.get("print_colorized", False))
def start_list_timeframes(args: dict[str, Any]) -> None:
"""
Print timeframes available on Exchange

View File

@@ -517,8 +517,11 @@ CONF_SCHEMA = {
},
"exit_fill": {
"description": "Telegram setting for exit fill signals.",
"type": "string",
"enum": TELEGRAM_SETTING_OPTIONS,
"type": ["string", "object"],
"additionalProperties": {
"type": "string",
"enum": TELEGRAM_SETTING_OPTIONS,
},
"default": "on",
},
"exit_cancel": {
@@ -995,6 +998,13 @@ CONF_SCHEMA = {
"type": "string",
"default": "example",
},
"wait_for_training_iteration_on_reload": {
"description": (
"Wait for the next training iteration to complete after /reload or ctrl+c."
),
"type": "boolean",
"default": True,
},
"feature_parameters": {
"description": "The parameters used to engineer the feature set",
"type": "object",

View File

@@ -5,9 +5,10 @@ This module contains the configuration class
import ast
import logging
import warnings
from collections.abc import Callable
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Optional
from typing import Any
from freqtrade import constants
from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings
@@ -37,9 +38,9 @@ class Configuration:
Reuse this class for the bot, backtesting, hyperopt and every script that required configuration
"""
def __init__(self, args: dict[str, Any], runmode: Optional[RunMode] = None) -> None:
def __init__(self, args: dict[str, Any], runmode: RunMode | None = None) -> None:
self.args = args
self.config: Optional[Config] = None
self.config: Config | None = None
self.runmode = runmode
def get_config(self) -> Config:
@@ -241,11 +242,7 @@ class Configuration:
logstring="Parameter --enable-protections detected, enabling Protections. ...",
)
if "use_max_market_positions" in self.args and not self.args["use_max_market_positions"]:
config.update({"use_max_market_positions": False})
logger.info("Parameter --disable-max-market-positions detected ...")
logger.info("max_open_trades set to unlimited ...")
elif "max_open_trades" in self.args and self.args["max_open_trades"]:
if "max_open_trades" in self.args and self.args["max_open_trades"]:
config.update({"max_open_trades": self.args["max_open_trades"]})
logger.info(
"Parameter --max-open-trades detected, overriding max_open_trades to: %s ...",
@@ -455,8 +452,8 @@ class Configuration:
config: Config,
argname: str,
logstring: str,
logfun: Optional[Callable] = None,
deprecated_msg: Optional[str] = None,
logfun: Callable | None = None,
deprecated_msg: str | None = None,
) -> None:
"""
:param config: Configuration dictionary

View File

@@ -3,7 +3,6 @@ Functions to handle deprecated settings
"""
import logging
from typing import Optional
from freqtrade.constants import Config
from freqtrade.exceptions import ConfigurationError, OperationalException
@@ -14,9 +13,9 @@ logger = logging.getLogger(__name__)
def check_conflicting_settings(
config: Config,
section_old: Optional[str],
section_old: str | None,
name_old: str,
section_new: Optional[str],
section_new: str | None,
name_new: str,
) -> None:
section_new_config = config.get(section_new, {}) if section_new else config
@@ -34,7 +33,7 @@ def check_conflicting_settings(
def process_removed_setting(
config: Config, section1: str, name1: str, section2: Optional[str], name2: str
config: Config, section1: str, name1: str, section2: str | None, name2: str
) -> None:
"""
:param section1: Removed section
@@ -54,9 +53,9 @@ def process_removed_setting(
def process_deprecated_setting(
config: Config,
section_old: Optional[str],
section_old: str | None,
name_old: str,
section_new: Optional[str],
section_new: str | None,
name_new: str,
) -> None:
check_conflicting_settings(config, section_old, name_old, section_new, name_new)

View File

@@ -1,7 +1,6 @@
import logging
import shutil
from pathlib import Path
from typing import Optional
from freqtrade.configuration.detect_environment import running_in_docker
from freqtrade.constants import (
@@ -18,7 +17,7 @@ from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__)
def create_datadir(config: Config, datadir: Optional[str] = None) -> Path:
def create_datadir(config: Config, datadir: str | None = None) -> Path:
folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
if not datadir:
# set datadir

View File

@@ -7,7 +7,7 @@ import re
import sys
from copy import deepcopy
from pathlib import Path
from typing import Any, Optional
from typing import Any
import rapidjson
@@ -78,7 +78,7 @@ def load_config_file(path: str) -> dict[str, Any]:
def load_from_files(
files: list[str], base_path: Optional[Path] = None, level: int = 0
files: list[str], base_path: Path | None = None, level: int = 0
) -> dict[str, Any]:
"""
Recursively load configuration files if specified.

View File

@@ -5,7 +5,6 @@ This module contains the argument manager class
import logging
import re
from datetime import datetime, timezone
from typing import Optional
from typing_extensions import Self
@@ -25,24 +24,24 @@ class TimeRange:
def __init__(
self,
starttype: Optional[str] = None,
stoptype: Optional[str] = None,
starttype: str | None = None,
stoptype: str | None = None,
startts: int = 0,
stopts: int = 0,
):
self.starttype: Optional[str] = starttype
self.stoptype: Optional[str] = stoptype
self.starttype: str | None = starttype
self.stoptype: str | None = stoptype
self.startts: int = startts
self.stopts: int = stopts
@property
def startdt(self) -> Optional[datetime]:
def startdt(self) -> datetime | None:
if self.startts:
return datetime.fromtimestamp(self.startts, tz=timezone.utc)
return None
@property
def stopdt(self) -> Optional[datetime]:
def stopdt(self) -> datetime | None:
if self.stopts:
return datetime.fromtimestamp(self.stopts, tz=timezone.utc)
return None
@@ -120,7 +119,7 @@ class TimeRange:
self.starttype = "date"
@classmethod
def parse_timerange(cls, text: Optional[str]) -> Self:
def parse_timerange(cls, text: str | None) -> Self:
"""
Parse the value of the argument --timerange to determine what is the range desired
:param text: value from --timerange

View File

@@ -4,7 +4,7 @@
bot constants
"""
from typing import Any, Literal, Optional
from typing import Any, Literal
from freqtrade.enums import CandleType, PriceType
@@ -38,6 +38,7 @@ HYPEROPT_LOSS_BUILTIN = [
"MaxDrawDownHyperOptLoss",
"MaxDrawDownRelativeHyperOptLoss",
"ProfitDrawDownHyperOptLoss",
"MultiMetricHyperOptLoss",
]
AVAILABLE_PAIRLISTS = [
"StaticPairList",
@@ -97,7 +98,7 @@ DL_DATA_TIMEFRAMES = ["1m", "5m"]
ENV_VAR_PREFIX = "FREQTRADE__"
CANCELED_EXCHANGE_STATES = ("cancelled", "canceled", "expired")
CANCELED_EXCHANGE_STATES = ("cancelled", "canceled", "expired", "rejected")
NON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ("closed",)
# Define decimals per coin for outputs
@@ -193,7 +194,7 @@ ListPairsWithTimeframes = list[PairWithTimeframe]
# Type for trades list
TradeList = list[list]
# ticks, pair, timeframe, CandleType
TickWithTimeframe = tuple[str, str, CandleType, Optional[int], Optional[int]]
TickWithTimeframe = tuple[str, str, CandleType, int | None, int | None]
ListTicksWithTimeframes = list[TickWithTimeframe]
LongShort = Literal["long", "short"]

View File

@@ -6,7 +6,7 @@ import logging
from copy import copy
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Literal, Optional, Union
from typing import Any, Literal
import numpy as np
import pandas as pd
@@ -53,7 +53,7 @@ BT_DATA_COLUMNS = [
]
def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> str:
def get_latest_optimize_filename(directory: Path | str, variant: str) -> str:
"""
Get latest backtest export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -84,7 +84,7 @@ def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> s
return data[f"latest_{variant}"]
def get_latest_backtest_filename(directory: Union[Path, str]) -> str:
def get_latest_backtest_filename(directory: Path | str) -> str:
"""
Get latest backtest export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -97,7 +97,7 @@ def get_latest_backtest_filename(directory: Union[Path, str]) -> str:
return get_latest_optimize_filename(directory, "backtest")
def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str:
def get_latest_hyperopt_filename(directory: Path | str) -> str:
"""
Get latest hyperopt export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -114,9 +114,7 @@ def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str:
return "hyperopt_results.pickle"
def get_latest_hyperopt_file(
directory: Union[Path, str], predef_filename: Optional[str] = None
) -> Path:
def get_latest_hyperopt_file(directory: Path | str, predef_filename: str | None = None) -> Path:
"""
Get latest hyperopt export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -137,7 +135,7 @@ def get_latest_hyperopt_file(
return directory / get_latest_hyperopt_filename(directory)
def load_backtest_metadata(filename: Union[Path, str]) -> dict[str, Any]:
def load_backtest_metadata(filename: Path | str) -> dict[str, Any]:
"""
Read metadata dictionary from backtest results file without reading and deserializing entire
file.
@@ -154,7 +152,7 @@ def load_backtest_metadata(filename: Union[Path, str]) -> dict[str, Any]:
raise OperationalException("Unexpected error while loading backtest metadata.") from e
def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType:
def load_backtest_stats(filename: Path | str) -> BacktestResultType:
"""
Load backtest statistics file.
:param filename: pathlib.Path object, or string pointing to the file.
@@ -244,9 +242,10 @@ def delete_backtest_result(file_abs: Path):
"""
# *.meta.json
logger.info(f"Deleting backtest result file: {file_abs.name}")
file_abs_meta = file_abs.with_suffix(".meta.json")
file_abs.unlink()
file_abs_meta.unlink()
for file in file_abs.parent.glob(f"{file_abs.stem}*"):
logger.info(f"Deleting file: {file}")
file.unlink()
def update_backtest_metadata(filename: Path, strategy: str, content: dict[str, Any]):
@@ -275,7 +274,7 @@ def get_backtest_market_change(filename: Path, include_ts: bool = True) -> pd.Da
def find_existing_backtest_stats(
dirname: Union[Path, str], run_ids: dict[str, str], min_backtest_date: Optional[datetime] = None
dirname: Path | str, run_ids: dict[str, str], min_backtest_date: datetime | None = None
) -> dict[str, Any]:
"""
Find existing backtest stats that match specified run IDs and load them.
@@ -344,7 +343,7 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
return df
def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = None) -> pd.DataFrame:
def load_backtest_data(filename: Path | str, strategy: str | None = None) -> pd.DataFrame:
"""
Load backtest data file.
:param filename: pathlib.Path object, or string pointing to a file or directory
@@ -438,7 +437,7 @@ def evaluate_result_multi(
return df_final[df_final["open_trades"] > max_open_trades]
def trade_list_to_dataframe(trades: Union[list[Trade], list[LocalTrade]]) -> pd.DataFrame:
def trade_list_to_dataframe(trades: list[Trade] | list[LocalTrade]) -> pd.DataFrame:
"""
Convert list of Trade objects to pandas Dataframe
:param trades: List of trade objects
@@ -452,7 +451,7 @@ def trade_list_to_dataframe(trades: Union[list[Trade], list[LocalTrade]]) -> pd.
return df
def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataFrame:
def load_trades_from_db(db_url: str, strategy: str | None = None) -> pd.DataFrame:
"""
Load trades from a DB (using dburl)
:param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)
@@ -475,7 +474,7 @@ def load_trades(
db_url: str,
exportfilename: Path,
no_trades: bool = False,
strategy: Optional[str] = None,
strategy: str | None = None,
) -> pd.DataFrame:
"""
Based on configuration option 'trade_source':

View File

@@ -8,7 +8,7 @@ Common Interface for bot and strategy to access data.
import logging
from collections import deque
from datetime import datetime, timezone
from typing import Any, Optional
from typing import Any
from pandas import DataFrame, Timedelta, Timestamp, to_timedelta
@@ -40,17 +40,17 @@ class DataProvider:
def __init__(
self,
config: Config,
exchange: Optional[Exchange],
exchange: Exchange | None,
pairlists=None,
rpc: Optional[RPCManager] = None,
rpc: RPCManager | None = None,
) -> None:
self._config = config
self._exchange = exchange
self._pairlists = pairlists
self.__rpc = rpc
self.__cached_pairs: dict[PairWithTimeframe, tuple[DataFrame, datetime]] = {}
self.__slice_index: Optional[int] = None
self.__slice_date: Optional[datetime] = None
self.__slice_index: int | None = None
self.__slice_date: datetime | None = None
self.__cached_pairs_backtesting: dict[PairWithTimeframe, DataFrame] = {}
self.__producer_pairs_df: dict[
@@ -255,8 +255,8 @@ class DataProvider:
def get_producer_df(
self,
pair: str,
timeframe: Optional[str] = None,
candle_type: Optional[CandleType] = None,
timeframe: str | None = None,
candle_type: CandleType | None = None,
producer_name: str = "default",
) -> tuple[DataFrame, datetime]:
"""
@@ -349,7 +349,7 @@ class DataProvider:
return total_candles
def get_pair_dataframe(
self, pair: str, timeframe: Optional[str] = None, candle_type: str = ""
self, pair: str, timeframe: str | None = None, candle_type: str = ""
) -> DataFrame:
"""
Return pair candle (OHLCV) data, either live or cached historical -- depending
@@ -437,7 +437,7 @@ class DataProvider:
def refresh(
self,
pairlist: ListPairsWithTimeframes,
helping_pairs: Optional[ListPairsWithTimeframes] = None,
helping_pairs: ListPairsWithTimeframes | None = None,
) -> None:
"""
Refresh data, called with each cycle
@@ -471,7 +471,7 @@ class DataProvider:
return list(self._exchange._klines.keys())
def ohlcv(
self, pair: str, timeframe: Optional[str] = None, copy: bool = True, candle_type: str = ""
self, pair: str, timeframe: str | None = None, copy: bool = True, candle_type: str = ""
) -> DataFrame:
"""
Get candle (OHLCV) data for the given pair as DataFrame
@@ -497,7 +497,7 @@ class DataProvider:
return DataFrame()
def trades(
self, pair: str, timeframe: Optional[str] = None, copy: bool = True, candle_type: str = ""
self, pair: str, timeframe: str | None = None, copy: bool = True, candle_type: str = ""
) -> DataFrame:
"""
Get candle (TRADES) data for the given pair as DataFrame
@@ -529,7 +529,7 @@ class DataProvider:
)
return trades_df
def market(self, pair: str) -> Optional[dict[str, Any]]:
def market(self, pair: str) -> dict[str, Any] | None:
"""
Return market data for the pair
:param pair: Pair to get the data for

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
from pandas import DataFrame, read_feather, to_datetime
@@ -37,7 +36,7 @@ class FeatherDataHandler(IDataHandler):
)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -59,20 +58,25 @@ class FeatherDataHandler(IDataHandler):
)
if not filename.exists():
return DataFrame(columns=self._columns)
pairdata = read_feather(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
try:
pairdata = read_feather(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
except Exception as e:
logger.exception(
f"Error loading data from {filename}. Exception: {e}. Returning empty dataframe."
)
return DataFrame(columns=self._columns)
def ohlcv_append(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
@@ -108,7 +112,7 @@ class FeatherDataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
import numpy as np
import pandas as pd
@@ -45,7 +44,7 @@ class HDF5DataHandler(IDataHandler):
)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> pd.DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -69,28 +68,36 @@ class HDF5DataHandler(IDataHandler):
)
if not filename.exists():
return pd.DataFrame(columns=self._columns)
where = []
if timerange:
if timerange.starttype == "date":
where.append(f"date >= Timestamp({timerange.startts * 1e9})")
if timerange.stoptype == "date":
where.append(f"date <= Timestamp({timerange.stopts * 1e9})")
try:
where = []
if timerange:
if timerange.starttype == "date":
where.append(f"date >= Timestamp({timerange.startts * 1e9})")
if timerange.stoptype == "date":
where.append(f"date <= Timestamp({timerange.stopts * 1e9})")
pairdata = pd.read_hdf(filename, key=key, mode="r", where=where)
pairdata = pd.read_hdf(filename, key=key, mode="r", where=where)
if list(pairdata.columns) != self._columns:
raise ValueError("Wrong dataframe format")
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata = pairdata.reset_index(drop=True)
return pairdata
if list(pairdata.columns) != self._columns:
raise ValueError("Wrong dataframe format")
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata = pairdata.reset_index(drop=True)
return pairdata
except ValueError:
raise
except Exception as e:
logger.exception(
f"Error loading data from {filename}. Exception: {e}. Returning empty dataframe."
)
return pd.DataFrame(columns=self._columns)
def ohlcv_append(
self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType
@@ -134,7 +141,7 @@ class HDF5DataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> pd.DataFrame:
"""
Load a pair from h5 file.

View File

@@ -10,7 +10,6 @@ from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
from pandas import DataFrame, to_datetime
@@ -126,7 +125,7 @@ class IDataHandler(ABC):
@abstractmethod
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -247,7 +246,7 @@ class IDataHandler(ABC):
@abstractmethod
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json
@@ -282,7 +281,7 @@ class IDataHandler(ABC):
return False
def trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json
@@ -370,7 +369,7 @@ class IDataHandler(ABC):
timeframe: str,
candle_type: CandleType,
*,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
fill_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
@@ -566,7 +565,7 @@ def get_datahandlerclass(datatype: str) -> type[IDataHandler]:
def get_datahandler(
datadir: Path, data_format: Optional[str] = None, data_handler: Optional[IDataHandler] = None
datadir: Path, data_format: str | None = None, data_handler: IDataHandler | None = None
) -> IDataHandler:
"""
:param datadir: Folder to save data

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
import numpy as np
from pandas import DataFrame, read_json, to_datetime
@@ -45,7 +44,7 @@ class JsonDataHandler(IDataHandler):
)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -119,7 +118,7 @@ class JsonDataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
from pandas import DataFrame, read_parquet, to_datetime
@@ -35,7 +34,7 @@ class ParquetDataHandler(IDataHandler):
data.reset_index(drop=True).loc[:, self._columns].to_parquet(filename)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -57,20 +56,25 @@ class ParquetDataHandler(IDataHandler):
)
if not filename.exists():
return DataFrame(columns=self._columns)
pairdata = read_parquet(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
try:
pairdata = read_parquet(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
except Exception as e:
logger.exception(
f"Error loading data from {filename}. Exception: {e}. Returning empty dataframe."
)
return DataFrame(columns=self._columns)
def ohlcv_append(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
@@ -106,7 +110,7 @@ class ParquetDataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json

View File

@@ -2,18 +2,11 @@ import logging
import operator
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
from pandas import DataFrame, concat
from freqtrade.configuration import TimeRange
from freqtrade.constants import (
DATETIME_PRINT_FORMAT,
DEFAULT_DATAFRAME_COLUMNS,
DL_DATA_TIMEFRAMES,
DOCS_LINK,
Config,
)
from freqtrade.constants import DATETIME_PRINT_FORMAT, DL_DATA_TIMEFRAMES, DOCS_LINK, Config
from freqtrade.data.converter import (
clean_ohlcv_dataframe,
convert_trades_to_ohlcv,
@@ -25,8 +18,9 @@ from freqtrade.enums import CandleType, TradingMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import Exchange
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
from freqtrade.util import dt_now, dt_ts, format_ms_time, get_progress_tracker
from freqtrade.util import dt_now, dt_ts, format_ms_time
from freqtrade.util.migrations import migrate_data
from freqtrade.util.progress_tracker import CustomProgress, retrieve_progress_tracker
logger = logging.getLogger(__name__)
@@ -37,12 +31,12 @@ def load_pair_history(
timeframe: str,
datadir: Path,
*,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
fill_up_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
data_format: Optional[str] = None,
data_handler: Optional[IDataHandler] = None,
data_format: str | None = None,
data_handler: IDataHandler | None = None,
candle_type: CandleType = CandleType.SPOT,
) -> DataFrame:
"""
@@ -79,13 +73,13 @@ def load_data(
timeframe: str,
pairs: list[str],
*,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
fill_up_missing: bool = True,
startup_candles: int = 0,
fail_without_data: bool = False,
data_format: str = "feather",
candle_type: CandleType = CandleType.SPOT,
user_futures_funding_rate: Optional[int] = None,
user_futures_funding_rate: int | None = None,
) -> dict[str, DataFrame]:
"""
Load ohlcv history data for a list of pairs.
@@ -137,8 +131,8 @@ def refresh_data(
timeframe: str,
pairs: list[str],
exchange: Exchange,
data_format: Optional[str] = None,
timerange: Optional[TimeRange] = None,
data_format: str | None = None,
timerange: TimeRange | None = None,
candle_type: CandleType,
) -> None:
"""
@@ -168,11 +162,11 @@ def refresh_data(
def _load_cached_data_for_updating(
pair: str,
timeframe: str,
timerange: Optional[TimeRange],
timerange: TimeRange | None,
data_handler: IDataHandler,
candle_type: CandleType,
prepend: bool = False,
) -> tuple[DataFrame, Optional[int], Optional[int]]:
) -> tuple[DataFrame, int | None, int | None]:
"""
Load cached data to download more data.
If timerange is passed in, checks whether data from an before the stored data will be
@@ -200,14 +194,21 @@ def _load_cached_data_for_updating(
candle_type=candle_type,
)
if not data.empty:
if not prepend and start and start < data.iloc[0]["date"]:
# Earlier data than existing data requested, redownload all
data = DataFrame(columns=DEFAULT_DATAFRAME_COLUMNS)
if prepend:
end = data.iloc[0]["date"]
else:
if prepend:
end = data.iloc[0]["date"]
else:
start = data.iloc[-1]["date"]
if start and start < data.iloc[0]["date"]:
# Earlier data than existing data requested, Update start date
logger.info(
f"{pair}, {timeframe}, {candle_type}: "
f"Requested start date {start:{DATETIME_PRINT_FORMAT}} earlier than local "
f"data start date {data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}. "
f"Use `--prepend` to download data prior "
f"to {data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}, or "
"`--erase` to redownload all data."
)
start = data.iloc[-1]["date"]
start_ms = int(start.timestamp() * 1000) if start else None
end_ms = int(end.timestamp() * 1000) if end else None
return data, start_ms, end_ms
@@ -220,8 +221,8 @@ def _download_pair_history(
exchange: Exchange,
timeframe: str = "5m",
new_pairs_days: int = 30,
data_handler: Optional[IDataHandler] = None,
timerange: Optional[TimeRange] = None,
data_handler: IDataHandler | None = None,
timerange: TimeRange | None = None,
candle_type: CandleType,
erase: bool = False,
prepend: bool = False,
@@ -322,21 +323,24 @@ def refresh_backtest_ohlcv_data(
timeframes: list[str],
datadir: Path,
trading_mode: str,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
new_pairs_days: int = 30,
erase: bool = False,
data_format: Optional[str] = None,
data_format: str | None = None,
prepend: bool = False,
progress_tracker: CustomProgress | None = None,
) -> list[str]:
"""
Refresh stored ohlcv data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
progress_tracker = retrieve_progress_tracker(progress_tracker)
pairs_not_available = []
data_handler = get_datahandler(datadir, data_format)
candle_type = CandleType.get_default(trading_mode)
with get_progress_tracker() as progress:
with progress_tracker as progress:
tf_length = len(timeframes) if trading_mode != "futures" else len(timeframes) + 2
timeframe_task = progress.add_task("Timeframe", total=tf_length)
pair_task = progress.add_task("Downloading data...", total=len(pairs))
@@ -346,7 +350,7 @@ def refresh_backtest_ohlcv_data(
progress.update(timeframe_task, completed=0)
if pair not in exchange.markets:
pairs_not_available.append(pair)
pairs_not_available.append(f"{pair}: Pair not available on exchange.")
logger.info(f"Skipping pair {pair}...")
continue
for timeframe in timeframes:
@@ -404,7 +408,7 @@ def _download_trades_history(
pair: str,
*,
new_pairs_days: int = 30,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
data_handler: IDataHandler,
trading_mode: TradingMode,
) -> bool:
@@ -412,79 +416,74 @@ def _download_trades_history(
Download trade history from the exchange.
Appends to previously downloaded trades data.
"""
try:
until = None
since = 0
if timerange:
if timerange.starttype == "date":
since = timerange.startts * 1000
if timerange.stoptype == "date":
until = timerange.stopts * 1000
until = None
since = 0
if timerange:
if timerange.starttype == "date":
since = timerange.startts * 1000
if timerange.stoptype == "date":
until = timerange.stopts * 1000
trades = data_handler.trades_load(pair, trading_mode)
trades = data_handler.trades_load(pair, trading_mode)
# TradesList columns are defined in constants.DEFAULT_TRADES_COLUMNS
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
# TradesList columns are defined in constants.DEFAULT_TRADES_COLUMNS
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
if not trades.empty and since > 0 and since < trades.iloc[0]["timestamp"]:
# since is before the first trade
logger.info(
f"Start ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}) earlier than "
f"available data. Redownloading trades for {pair}..."
)
trades = trades_list_to_df([])
from_id = trades.iloc[-1]["id"] if not trades.empty else None
if not trades.empty and since < trades.iloc[-1]["timestamp"]:
# Reset since to the last available point
# - 5 seconds (to ensure we're getting all trades)
since = trades.iloc[-1]["timestamp"] - (5 * 1000)
logger.info(
f"Using last trade date -5s - Downloading trades for {pair} "
f"since: {format_ms_time(since)}."
)
if not since:
since = dt_ts(dt_now() - timedelta(days=new_pairs_days))
logger.debug(
"Current Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
if not trades.empty and since > 0 and since < trades.iloc[0]["timestamp"]:
# since is before the first trade
raise ValueError(
f"Start {format_ms_time(since)} earlier than "
f"available data ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}). "
f"Please use `--erase` if you'd like to redownload {pair}."
)
logger.debug(
"Current End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"Current Amount of trades: {len(trades)}")
# Default since_ms to 30 days if nothing is given
new_trades = exchange.get_historic_trades(
pair=pair,
since=since,
until=until,
from_id=from_id,
from_id = trades.iloc[-1]["id"] if not trades.empty else None
if not trades.empty and since < trades.iloc[-1]["timestamp"]:
# Reset since to the last available point
# - 5 seconds (to ensure we're getting all trades)
since = trades.iloc[-1]["timestamp"] - (5 * 1000)
logger.info(
f"Using last trade date -5s - Downloading trades for {pair} "
f"since: {format_ms_time(since)}."
)
new_trades_df = trades_list_to_df(new_trades[1])
trades = concat([trades, new_trades_df], axis=0)
# Remove duplicates to make sure we're not storing data we don't need
trades = trades_df_remove_duplicates(trades)
data_handler.trades_store(pair, trades, trading_mode)
logger.debug(
"New Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"New End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"New Amount of trades: {len(trades)}")
return True
if not since:
since = dt_ts(dt_now() - timedelta(days=new_pairs_days))
except Exception:
logger.exception(f'Failed to download and store historic trades for pair: "{pair}". ')
return False
logger.debug(
"Current Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"Current End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"Current Amount of trades: {len(trades)}")
# Default since_ms to 30 days if nothing is given
new_trades = exchange.get_historic_trades(
pair=pair,
since=since,
until=until,
from_id=from_id,
)
new_trades_df = trades_list_to_df(new_trades[1])
trades = concat([trades, new_trades_df], axis=0)
# Remove duplicates to make sure we're not storing data we don't need
trades = trades_df_remove_duplicates(trades)
data_handler.trades_store(pair, trades, trading_mode)
logger.debug(
"New Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"New End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"New Amount of trades: {len(trades)}")
return True
def refresh_backtest_trades_data(
@@ -496,20 +495,22 @@ def refresh_backtest_trades_data(
new_pairs_days: int = 30,
erase: bool = False,
data_format: str = "feather",
progress_tracker: CustomProgress | None = None,
) -> list[str]:
"""
Refresh stored trades data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
progress_tracker = retrieve_progress_tracker(progress_tracker)
pairs_not_available = []
data_handler = get_datahandler(datadir, data_format=data_format)
with get_progress_tracker() as progress:
with progress_tracker as progress:
pair_task = progress.add_task("Downloading data...", total=len(pairs))
for pair in pairs:
progress.update(pair_task, description=f"Downloading trades [{pair}]")
if pair not in exchange.markets:
pairs_not_available.append(pair)
pairs_not_available.append(f"{pair}: Pair not available on exchange.")
logger.info(f"Skipping pair {pair}...")
continue
@@ -518,14 +519,22 @@ def refresh_backtest_trades_data(
logger.info(f"Deleting existing data for pair {pair}.")
logger.info(f"Downloading trades for pair {pair}.")
_download_trades_history(
exchange=exchange,
pair=pair,
new_pairs_days=new_pairs_days,
timerange=timerange,
data_handler=data_handler,
trading_mode=trading_mode,
)
try:
_download_trades_history(
exchange=exchange,
pair=pair,
new_pairs_days=new_pairs_days,
timerange=timerange,
data_handler=data_handler,
trading_mode=trading_mode,
)
except ValueError as e:
pairs_not_available.append(f"{pair}: {str(e)}")
except Exception:
logger.exception(
f'Failed to download and store historic trades for pair: "{pair}". '
)
progress.update(pair_task, advance=1)
return pairs_not_available
@@ -577,23 +586,35 @@ def validate_backtest_data(
def download_data_main(config: Config) -> None:
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
exchange = ExchangeResolver.load_exchange(config, validate=False)
download_data(config, exchange)
def download_data(
config: Config,
exchange: Exchange,
*,
progress_tracker: CustomProgress | None = None,
) -> None:
"""
Download data function. Used from both cli and API.
"""
timerange = TimeRange()
if "days" in config:
time_since = (datetime.now() - timedelta(days=config["days"])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f"{time_since}-")
if "timerange" in config:
timerange = timerange.parse_timerange(config["timerange"])
timerange = TimeRange.parse_timerange(config["timerange"])
# Remove stake-currency to skip checks which are not relevant for datadownload
config["stake_currency"] = ""
pairs_not_available: list[str] = []
# Init exchange
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
exchange = ExchangeResolver.load_exchange(config, validate=False)
available_pairs = [
p
for p in exchange.get_markets(
@@ -605,17 +626,18 @@ def download_data_main(config: Config) -> None:
if "timeframes" not in config:
config["timeframes"] = DL_DATA_TIMEFRAMES
logger.info(
f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}"
)
if len(expanded_pairs) == 0:
logger.warning(
"No pairs available for download. "
"Please make sure you're using the correct Pair naming for your selected trade mode. \n"
f"More info: {DOCS_LINK}/bot-basics/#pair-naming"
)
return
logger.info(
f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}"
)
for timeframe in config["timeframes"]:
exchange.validate_timeframes(timeframe)
@@ -637,6 +659,7 @@ def download_data_main(config: Config) -> None:
erase=bool(config.get("erase")),
data_format=config["dataformat_trades"],
trading_mode=config.get("trading_mode", TradingMode.SPOT),
progress_tracker=progress_tracker,
)
if config.get("convert_trades") or not exchange.get_option("ohlcv_has_history", True):
@@ -672,10 +695,12 @@ def download_data_main(config: Config) -> None:
data_format=config["dataformat_ohlcv"],
trading_mode=config.get("trading_mode", "spot"),
prepend=config.get("prepend_data", False),
progress_tracker=progress_tracker,
)
finally:
if pairs_not_available:
logger.info(
f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}."
errors = "\n" + ("\n".join(pairs_not_available))
logger.warning(
f"Encountered a problem downloading the following pairs from {exchange.name}: "
f"{errors}"
)

View File

@@ -261,8 +261,8 @@ def calculate_expectancy(trades: pd.DataFrame) -> tuple[float, float]:
:return: expectancy, expectancy_ratio
"""
expectancy = 0
expectancy_ratio = 100
expectancy = 0.0
expectancy_ratio = 100.0
if len(trades) > 0:
winning_trades = trades.loc[trades["profit_abs"] > 0]

View File

@@ -43,4 +43,5 @@ from freqtrade.exchange.hyperliquid import Hyperliquid
from freqtrade.exchange.idex import Idex
from freqtrade.exchange.kraken import Kraken
from freqtrade.exchange.kucoin import Kucoin
from freqtrade.exchange.lbank import Lbank
from freqtrade.exchange.okx import Okx

View File

@@ -3,7 +3,6 @@
import logging
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
import ccxt
@@ -53,12 +52,12 @@ class Binance(Exchange):
(TradingMode.FUTURES, MarginMode.ISOLATED)
]
def get_tickers(self, symbols: Optional[list[str]] = None, cached: bool = False) -> Tickers:
def get_tickers(self, symbols: list[str] | None = None, *, cached: bool = False) -> Tickers:
tickers = super().get_tickers(symbols=symbols, cached=cached)
if self.trading_mode == TradingMode.FUTURES:
# Binance's future result has no bid/ask values.
# Therefore we must fetch that from fetch_bids_asks and combine the two results.
bidsasks = self.fetch_bids_asks(symbols, cached)
bidsasks = self.fetch_bids_asks(symbols, cached=cached)
tickers = deep_merge_dicts(bidsasks, tickers, allow_null_overrides=False)
return tickers
@@ -106,7 +105,7 @@ class Binance(Exchange):
candle_type: CandleType,
is_new_pair: bool = False,
raise_: bool = False,
until_ms: Optional[int] = None,
until_ms: int | None = None,
) -> OHLCVResponse:
"""
Overwrite to introduce "fast new pair" functionality by detecting the pair's listing date
@@ -144,9 +143,7 @@ class Binance(Exchange):
"""
return open_date.minute == 0 and open_date.second < 15
def fetch_funding_rates(
self, symbols: Optional[list[str]] = None
) -> dict[str, dict[str, float]]:
def fetch_funding_rates(self, symbols: list[str] | None = None) -> dict[str, dict[str, float]]:
"""
Fetch funding rates for the given symbols.
:param symbols: List of symbols to fetch funding rates for
@@ -177,7 +174,7 @@ class Binance(Exchange):
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> Optional[float]:
) -> float | None:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
MARGIN: https://www.binance.com/en/support/faq/f6b010588e55413aa58b7d63ee0125ed
@@ -263,3 +260,19 @@ class Binance(Exchange):
return self.get_leverage_tiers()
else:
return {}
async def _async_get_trade_history_id_startup(
self, pair: str, since: int | None
) -> tuple[list[list], str]:
"""
override for initial call
Binance only provides a limited set of historic trades data.
Using from_id=0, we can get the earliest available trades.
So if we don't get any data with the provided "since", we can assume to
download all available data.
"""
t, from_id = await self._async_fetch_trades(pair, since=since)
if not t:
return [], "0"
return t, from_id

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,6 @@
import logging
from datetime import datetime, timezone
from typing import Optional
from freqtrade.exchange import Exchange
@@ -17,7 +16,7 @@ class Bitpanda(Exchange):
"""
def get_trades_for_order(
self, order_id: str, pair: str, since: datetime, params: Optional[dict] = None
self, order_id: str, pair: str, since: datetime, params: dict | None = None
) -> list:
"""
Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id.

View File

@@ -2,16 +2,16 @@
import logging
from datetime import datetime, timedelta
from typing import Any, Optional
from typing import Any
import ccxt
from freqtrade.constants import BuySell
from freqtrade.enums import CandleType, MarginMode, PriceType, TradingMode
from freqtrade.enums import MarginMode, PriceType, TradingMode
from freqtrade.exceptions import DDosProtection, ExchangeError, OperationalException, TemporaryError
from freqtrade.exchange import Exchange
from freqtrade.exchange.common import retrier
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.util.datetime_helpers import dt_now, dt_ts
@@ -36,11 +36,18 @@ class Bybit(Exchange):
"order_time_in_force": ["GTC", "FOK", "IOC", "PO"],
"ws_enabled": True,
"trades_has_history": False, # Endpoint doesn't support pagination
"exchange_has_overrides": {
# Bybit spot does not support fetch_order
# Unless the account is unified.
# TODO: Can be removed once bybit fully forces all accounts to unified mode.
"fetchOrder": False,
},
}
_ft_has_futures: FtHas = {
"ohlcv_has_history": True,
"mark_ohlcv_timeframe": "4h",
"funding_fee_timeframe": "8h",
"funding_fee_candle_limit": 200,
"stoploss_on_exchange": True,
"stoploss_order_types": {"limit": "limit", "market": "market"},
# bybit response parsing fails to populate stopLossPrice
@@ -51,6 +58,9 @@ class Bybit(Exchange):
PriceType.MARK: "MarkPrice",
PriceType.INDEX: "IndexPrice",
},
"exchange_has_overrides": {
"fetchOrder": True,
},
}
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [
@@ -105,14 +115,6 @@ class Bybit(Exchange):
except ccxt.BaseError as e:
raise OperationalException(e) from e
def ohlcv_candle_limit(
self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None
) -> int:
if candle_type in (CandleType.FUNDING_RATE):
return 200
return super().ohlcv_candle_limit(timeframe, candle_type, since_ms)
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
if self.trading_mode != TradingMode.SPOT:
params = {"leverage": leverage}
@@ -138,6 +140,17 @@ class Bybit(Exchange):
params["position_idx"] = 0
return params
def _order_needs_price(self, side: BuySell, ordertype: str) -> bool:
# Bybit requires price for market orders - but only for classic accounts,
# and only in spot mode
return (
ordertype != "market"
or (
side == "buy" and not self.unified_account and self.trading_mode == TradingMode.SPOT
)
or self._ft_has.get("marketOrderRequiresPrice", False)
)
def dry_run_liquidation_price(
self,
pair: str,
@@ -148,7 +161,7 @@ class Bybit(Exchange):
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> Optional[float]:
) -> float | None:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
PERPETUAL:
@@ -220,7 +233,9 @@ class Bybit(Exchange):
logger.warning(f"Could not update funding fees for {pair}.")
return 0.0
def fetch_orders(self, pair: str, since: datetime, params: Optional[dict] = None) -> list[dict]:
def fetch_orders(
self, pair: str, since: datetime, params: dict | None = None
) -> list[CcxtOrder]:
"""
Fetch all orders for a pair "since"
:param pair: Pair for the query
@@ -237,7 +252,7 @@ class Bybit(Exchange):
return orders
def fetch_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_order(self, order_id: str, pair: str, params: dict | None = None) -> CcxtOrder:
if self.exchange_has("fetchOrder"):
# Set acknowledged to True to avoid ccxt exception
params = {"acknowledged": True}

View File

@@ -1,8 +1,9 @@
import asyncio
import logging
import time
from collections.abc import Callable
from functools import wraps
from typing import Any, Callable, Optional, TypeVar, cast, overload
from typing import Any, TypeVar, cast, overload
from freqtrade.constants import ExchangeConfig
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
@@ -57,6 +58,7 @@ SUPPORTED_EXCHANGES = [
"bybit",
"gate",
"htx",
"hyperliquid",
"kraken",
"okx",
]
@@ -172,7 +174,7 @@ def retrier(_func: F, *, retries=API_RETRY_COUNT) -> F: ...
def retrier(*, retries=API_RETRY_COUNT) -> Callable[[F], F]: ...
def retrier(_func: Optional[F] = None, *, retries=API_RETRY_COUNT):
def retrier(_func: F | None = None, *, retries=API_RETRY_COUNT):
def decorator(f: F) -> F:
@wraps(f)
def wrapper(*args, **kwargs):
@@ -185,7 +187,7 @@ def retrier(_func: Optional[F] = None, *, retries=API_RETRY_COUNT):
logger.warning(msg + f"Retrying still for {count} times.")
count -= 1
kwargs.update({"count": count})
if isinstance(ex, (DDosProtection, RetryableOrderError)):
if isinstance(ex, DDosProtection | RetryableOrderError):
# increasing backoff
backoff_delay = calculate_backoff(count + 1, retries)
logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")

View File

@@ -12,7 +12,7 @@ from copy import deepcopy
from datetime import datetime, timedelta, timezone
from math import floor, isnan
from threading import Lock
from typing import Any, Literal, Optional, Union
from typing import Any, Literal, TypeGuard
import ccxt
import ccxt.pro as ccxt_pro
@@ -70,6 +70,7 @@ from freqtrade.exchange.common import (
)
from freqtrade.exchange.exchange_types import (
CcxtBalances,
CcxtOrder,
CcxtPosition,
FtHas,
OHLCVResponse,
@@ -169,7 +170,7 @@ class Exchange:
self,
config: Config,
*,
exchange_config: Optional[ExchangeConfig] = None,
exchange_config: ExchangeConfig | None = None,
validate: bool = True,
load_leverage_tiers: bool = False,
) -> None:
@@ -181,7 +182,7 @@ class Exchange:
self._api: ccxt.Exchange
self._api_async: ccxt_pro.Exchange
self._ws_async: ccxt_pro.Exchange = None
self._exchange_ws: Optional[ExchangeWS] = None
self._exchange_ws: ExchangeWS | None = None
self._markets: dict = {}
self._trading_fees: dict[str, Any] = {}
self._leverage_tiers: dict[str, list[dict]] = {}
@@ -198,8 +199,8 @@ class Exchange:
# Timestamp of last markets refresh
self._last_markets_refresh: int = 0
# Cache for 10 minutes ...
self._cache_lock = Lock()
# Cache for 10 minutes ...
self._fetch_tickers_cache: TTLCache = TTLCache(maxsize=2, ttl=60 * 10)
# Cache values for 300 to avoid frequent polling of the exchange for prices
# Caching only applies to RPC methods, so prices for open trades are still
@@ -378,7 +379,7 @@ class Exchange:
logger.info("Applying additional ccxt config: %s", ccxt_kwargs)
if self._ccxt_params:
# Inject static options after the above output to not confuse users.
ccxt_kwargs = deep_merge_dicts(self._ccxt_params, ccxt_kwargs)
ccxt_kwargs = deep_merge_dicts(self._ccxt_params, deepcopy(ccxt_kwargs))
if ccxt_kwargs:
ex_config.update(ccxt_kwargs)
try:
@@ -452,28 +453,31 @@ class Exchange:
logger.info(f"API {endpoint}: {add_info_str}{response}")
def ohlcv_candle_limit(
self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None
self, timeframe: str, candle_type: CandleType, since_ms: int | None = None
) -> int:
"""
Exchange ohlcv candle limit
Uses ohlcv_candle_limit_per_timeframe if the exchange has different limits
per timeframe (e.g. bittrex), otherwise falls back to ohlcv_candle_limit
TODO: this is most likely no longer needed since only bittrex needed this.
:param timeframe: Timeframe to check
:param candle_type: Candle-type
:param since_ms: Starting timestamp
:return: Candle limit as integer
"""
fallback_val = self._ft_has.get("ohlcv_candle_limit")
if candle_type == CandleType.FUNDING_RATE:
fallback_val = self._ft_has.get("funding_fee_candle_limit", fallback_val)
return int(
self._ft_has.get("ohlcv_candle_limit_per_timeframe", {}).get(
timeframe, str(self._ft_has.get("ohlcv_candle_limit"))
timeframe, str(fallback_val)
)
)
def get_markets(
self,
base_currencies: Optional[list[str]] = None,
quote_currencies: Optional[list[str]] = None,
base_currencies: list[str] | None = None,
quote_currencies: list[str] | None = None,
spot_only: bool = False,
margin_only: bool = False,
futures_only: bool = False,
@@ -522,6 +526,7 @@ class Exchange:
def market_is_future(self, market: dict[str, Any]) -> bool:
return (
market.get(self._ft_has["ccxt_futures_name"], False) is True
and market.get("type", False) == "swap"
and market.get("linear", False) is True
)
@@ -566,7 +571,7 @@ class Exchange:
else:
return DataFrame(columns=DEFAULT_TRADES_COLUMNS)
def get_contract_size(self, pair: str) -> Optional[float]:
def get_contract_size(self, pair: str) -> float | None:
if self.trading_mode == TradingMode.FUTURES:
market = self.markets.get(pair, {})
contract_size: float = 1.0
@@ -587,7 +592,7 @@ class Exchange:
trade["amount"] = trade["amount"] * contract_size
return trades
def _order_contracts_to_amount(self, order: dict) -> dict:
def _order_contracts_to_amount(self, order: CcxtOrder) -> CcxtOrder:
if "symbol" in order and order["symbol"] is not None:
contract_size = self.get_contract_size(order["symbol"])
if contract_size != 1:
@@ -709,7 +714,7 @@ class Exchange:
return pair
raise ValueError(f"Could not combine {curr_1} and {curr_2} to get a valid pair.")
def validate_timeframes(self, timeframe: Optional[str]) -> None:
def validate_timeframes(self, timeframe: str | None) -> None:
"""
Check if timeframe from config is a supported timeframe on the exchange
"""
@@ -839,7 +844,7 @@ class Exchange:
def validate_trading_mode_and_margin_mode(
self,
trading_mode: TradingMode,
margin_mode: Optional[MarginMode], # Only None when trading_mode = TradingMode.SPOT
margin_mode: MarginMode | None, # Only None when trading_mode = TradingMode.SPOT
):
"""
Checks if freqtrade can perform trades using the configured
@@ -855,7 +860,7 @@ class Exchange:
f"Freqtrade does not support {mm_value} {trading_mode} on {self.name}"
)
def get_option(self, param: str, default: Optional[Any] = None) -> Any:
def get_option(self, param: str, default: Any | None = None) -> Any:
"""
Get parameter value from _ft_has
"""
@@ -872,7 +877,7 @@ class Exchange:
return self._ft_has["exchange_has_overrides"][endpoint]
return endpoint in self._api_async.has and self._api_async.has[endpoint]
def get_precision_amount(self, pair: str) -> Optional[float]:
def get_precision_amount(self, pair: str) -> float | None:
"""
Returns the amount precision of the exchange.
:param pair: Pair to get precision for
@@ -880,7 +885,7 @@ class Exchange:
"""
return self.markets.get(pair, {}).get("precision", {}).get("amount", None)
def get_precision_price(self, pair: str) -> Optional[float]:
def get_precision_price(self, pair: str) -> float | None:
"""
Returns the price precision of the exchange.
:param pair: Pair to get precision for
@@ -920,8 +925,8 @@ class Exchange:
return 1 / pow(10, precision)
def get_min_pair_stake_amount(
self, pair: str, price: float, stoploss: float, leverage: Optional[float] = 1.0
) -> Optional[float]:
self, pair: str, price: float, stoploss: float, leverage: float | None = 1.0
) -> float | None:
return self._get_stake_amount_limit(pair, price, stoploss, "min", leverage)
def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float:
@@ -939,8 +944,8 @@ class Exchange:
price: float,
stoploss: float,
limit: Literal["min", "max"],
leverage: Optional[float] = 1.0,
) -> Optional[float]:
leverage: float | None = 1.0,
) -> float | None:
isMin = limit == "min"
try:
@@ -997,20 +1002,20 @@ class Exchange:
self,
pair: str,
ordertype: str,
side: str,
side: BuySell,
amount: float,
rate: float,
leverage: float,
params: Optional[dict] = None,
params: dict | None = None,
stop_loss: bool = False,
) -> dict[str, Any]:
) -> CcxtOrder:
now = dt_now()
order_id = f"dry_run_{side}_{pair}_{now.timestamp()}"
# Rounding here must respect to contract sizes
_amount = self._contracts_to_amount(
pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))
)
dry_order: dict[str, Any] = {
dry_order: CcxtOrder = {
"id": order_id,
"symbol": pair,
"price": rate,
@@ -1026,14 +1031,13 @@ class Exchange:
"status": "open",
"fee": None,
"info": {},
"leverage": leverage,
}
if stop_loss:
dry_order["info"] = {"stopPrice": dry_order["price"]}
dry_order[self._ft_has["stop_price_prop"]] = dry_order["price"]
# Workaround to avoid filling stoploss orders immediately
dry_order["ft_order_type"] = "stoploss"
orderbook: Optional[OrderBook] = None
orderbook: OrderBook | None = None
if self.exchange_has("fetchL2OrderBook"):
orderbook = self.fetch_l2_order_book(pair, 20)
if ordertype == "limit" and orderbook:
@@ -1055,7 +1059,7 @@ class Exchange:
"filled": _amount,
"remaining": 0.0,
"status": "closed",
"cost": (dry_order["amount"] * average),
"cost": (_amount * average),
}
)
# market orders will always incurr taker fees
@@ -1072,9 +1076,9 @@ class Exchange:
def add_dry_order_fee(
self,
pair: str,
dry_order: dict[str, Any],
dry_order: CcxtOrder,
taker_or_maker: MakerTaker,
) -> dict[str, Any]:
) -> CcxtOrder:
fee = self.get_fee(pair, taker_or_maker=taker_or_maker)
dry_order.update(
{
@@ -1088,7 +1092,7 @@ class Exchange:
return dry_order
def get_dry_market_fill_price(
self, pair: str, side: str, amount: float, rate: float, orderbook: Optional[OrderBook]
self, pair: str, side: str, amount: float, rate: float, orderbook: OrderBook | None
) -> float:
"""
Get the market order fill price based on orderbook interpolation
@@ -1136,7 +1140,7 @@ class Exchange:
pair: str,
side: str,
limit: float,
orderbook: Optional[OrderBook] = None,
orderbook: OrderBook | None = None,
offset: float = 0.0,
) -> bool:
if not self.exchange_has("fetchL2OrderBook"):
@@ -1158,8 +1162,8 @@ class Exchange:
return False
def check_dry_limit_order_filled(
self, order: dict[str, Any], immediate: bool = False, orderbook: Optional[OrderBook] = None
) -> dict[str, Any]:
self, order: CcxtOrder, immediate: bool = False, orderbook: OrderBook | None = None
) -> CcxtOrder:
"""
Check dry-run limit order fill and update fee (if it filled).
"""
@@ -1186,7 +1190,7 @@ class Exchange:
return order
def fetch_dry_run_order(self, order_id) -> dict[str, Any]:
def fetch_dry_run_order(self, order_id) -> CcxtOrder:
"""
Return dry-run order
Only call if running in dry-run mode.
@@ -1230,10 +1234,10 @@ class Exchange:
params.update({"reduceOnly": True})
return params
def _order_needs_price(self, ordertype: str) -> bool:
def _order_needs_price(self, side: BuySell, ordertype: str) -> bool:
return (
ordertype != "market"
or self._api.options.get("createMarketBuyOrderRequiresPrice", False)
or (side == "buy" and self._api.options.get("createMarketBuyOrderRequiresPrice", False))
or self._ft_has.get("marketOrderRequiresPrice", False)
)
@@ -1248,7 +1252,7 @@ class Exchange:
leverage: float,
reduceOnly: bool = False,
time_in_force: str = "GTC",
) -> dict:
) -> CcxtOrder:
if self._config["dry_run"]:
dry_order = self.create_dry_run_order(
pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage
@@ -1260,7 +1264,7 @@ class Exchange:
try:
# Set the precision for amount and price(rate) as accepted by the exchange
amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))
needs_price = self._order_needs_price(ordertype)
needs_price = self._order_needs_price(side, ordertype)
rate_for_order = self.price_to_precision(pair, rate) if needs_price else None
if not reduceOnly:
@@ -1306,7 +1310,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def stoploss_adjust(self, stop_loss: float, order: dict, side: str) -> bool:
def stoploss_adjust(self, stop_loss: float, order: CcxtOrder, side: str) -> bool:
"""
Verify stop_loss against stoploss-order value (limit or price)
Returns True if adjustment is necessary.
@@ -1367,7 +1371,7 @@ class Exchange:
order_types: dict,
side: BuySell,
leverage: float,
) -> dict:
) -> CcxtOrder:
"""
creates a stoploss order.
requires `_ft_has['stoploss_order_types']` to be set as a dict mapping limit and market
@@ -1460,7 +1464,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def fetch_order_emulated(self, order_id: str, pair: str, params: dict) -> dict:
def fetch_order_emulated(self, order_id: str, pair: str, params: dict) -> CcxtOrder:
"""
Emulated fetch_order if the exchange doesn't support fetch_order, but requires separate
calls for open and closed orders.
@@ -1494,7 +1498,7 @@ class Exchange:
raise OperationalException(e) from e
@retrier(retries=API_FETCH_ORDER_RETRY_COUNT)
def fetch_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_order(self, order_id: str, pair: str, params: dict | None = None) -> CcxtOrder:
if self._config["dry_run"]:
return self.fetch_dry_run_order(order_id)
if params is None:
@@ -1523,12 +1527,14 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
return self.fetch_order(order_id, pair, params)
def fetch_order_or_stoploss_order(
self, order_id: str, pair: str, stoploss_order: bool = False
) -> dict:
) -> CcxtOrder:
"""
Simple wrapper calling either fetch_order or fetch_stoploss_order depending on
the stoploss_order parameter
@@ -1540,7 +1546,7 @@ class Exchange:
return self.fetch_stoploss_order(order_id, pair)
return self.fetch_order(order_id, pair)
def check_order_canceled_empty(self, order: dict) -> bool:
def check_order_canceled_empty(self, order: CcxtOrder) -> bool:
"""
Verify if an order has been cancelled without being partially filled
:param order: Order dict as returned from fetch_order()
@@ -1549,7 +1555,7 @@ class Exchange:
return order.get("status") in NON_OPEN_EXCHANGE_STATES and order.get("filled") == 0.0
@retrier
def cancel_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def cancel_order(self, order_id: str, pair: str, params: dict | None = None) -> dict[str, Any]:
if self._config["dry_run"]:
try:
order = self.fetch_dry_run_order(order_id)
@@ -1577,19 +1583,17 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def cancel_stoploss_order(
self, order_id: str, pair: str, params: Optional[dict] = None
) -> dict:
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
return self.cancel_order(order_id, pair, params)
def is_cancel_order_result_suitable(self, corder) -> bool:
def is_cancel_order_result_suitable(self, corder) -> TypeGuard[CcxtOrder]:
if not isinstance(corder, dict):
return False
required = ("fee", "status", "amount")
return all(corder.get(k, None) is not None for k in required)
def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> dict:
def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> CcxtOrder:
"""
Cancel order returning a result.
Creates a fake result if cancel order returns a non-usable result
@@ -1620,7 +1624,9 @@ class Exchange:
return order
def cancel_stoploss_order_with_result(self, order_id: str, pair: str, amount: float) -> dict:
def cancel_stoploss_order_with_result(
self, order_id: str, pair: str, amount: float
) -> CcxtOrder:
"""
Cancel stoploss order returning a result.
Creates a fake result if cancel order returns a non-usable result
@@ -1651,6 +1657,7 @@ class Exchange:
balances.pop("total", None)
balances.pop("used", None)
self._log_exchange_response("fetch_balances", balances)
return balances
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
@@ -1662,7 +1669,7 @@ class Exchange:
raise OperationalException(e) from e
@retrier
def fetch_positions(self, pair: Optional[str] = None) -> list[CcxtPosition]:
def fetch_positions(self, pair: str | None = None) -> list[CcxtPosition]:
"""
Fetch positions from the exchange.
If no pair is given, all positions are returned.
@@ -1686,7 +1693,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[dict]:
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[CcxtOrder]:
orders = []
if self.exchange_has("fetchClosedOrders"):
orders = self._api.fetch_closed_orders(pair, since=since_ms)
@@ -1696,7 +1703,9 @@ class Exchange:
return orders
@retrier(retries=0)
def fetch_orders(self, pair: str, since: datetime, params: Optional[dict] = None) -> list[dict]:
def fetch_orders(
self, pair: str, since: datetime, params: dict | None = None
) -> list[CcxtOrder]:
"""
Fetch all orders for a pair "since"
:param pair: Pair for the query
@@ -1712,7 +1721,9 @@ class Exchange:
if not params:
params = {}
try:
orders: list[dict] = self._api.fetch_orders(pair, since=since_ms, params=params)
orders: list[CcxtOrder] = self._api.fetch_orders(
pair, since=since_ms, params=params
)
except ccxt.NotSupported:
# Some exchanges don't support fetchOrders
# attempt to fetch open and closed orders separately
@@ -1757,7 +1768,7 @@ class Exchange:
raise OperationalException(e) from e
@retrier
def fetch_bids_asks(self, symbols: Optional[list[str]] = None, cached: bool = False) -> dict:
def fetch_bids_asks(self, symbols: list[str] | None = None, *, cached: bool = False) -> dict:
"""
:param symbols: List of symbols to fetch
:param cached: Allow cached result
@@ -1790,8 +1801,9 @@ class Exchange:
raise OperationalException(e) from e
@retrier
def get_tickers(self, symbols: Optional[list[str]] = None, cached: bool = False) -> Tickers:
def get_tickers(self, symbols: list[str] | None = None, *, cached: bool = False) -> Tickers:
"""
:param symbols: List of symbols to fetch
:param cached: Allow cached result
:return: fetch_tickers result
"""
@@ -1850,7 +1862,7 @@ class Exchange:
@staticmethod
def get_next_limit_in_list(
limit: int, limit_range: Optional[list[int]], range_required: bool = True
limit: int, limit_range: list[int] | None, range_required: bool = True
):
"""
Get next greater value in the list.
@@ -1914,8 +1926,8 @@ class Exchange:
refresh: bool,
side: EntryExit,
is_short: bool,
order_book: Optional[OrderBook] = None,
ticker: Optional[Ticker] = None,
order_book: OrderBook | None = None,
ticker: Ticker | None = None,
) -> float:
"""
Calculates bid/ask target
@@ -1964,7 +1976,7 @@ class Exchange:
def _get_rate_from_ticker(
self, side: EntryExit, ticker: Ticker, conf_strategy: dict[str, Any], price_side: BidAsk
) -> Optional[float]:
) -> float | None:
"""
Get rate from ticker.
"""
@@ -2043,7 +2055,7 @@ class Exchange:
@retrier
def get_trades_for_order(
self, order_id: str, pair: str, since: datetime, params: Optional[dict] = None
self, order_id: str, pair: str, since: datetime, params: dict | None = None
) -> list:
"""
Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id.
@@ -2090,7 +2102,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def get_order_id_conditional(self, order: dict[str, Any]) -> str:
def get_order_id_conditional(self, order: CcxtOrder) -> str:
return order["id"]
@retrier
@@ -2139,7 +2151,7 @@ class Exchange:
raise OperationalException(e) from e
@staticmethod
def order_has_fee(order: dict) -> bool:
def order_has_fee(order: CcxtOrder) -> bool:
"""
Verifies if the passed in order dict has the needed keys to extract fees,
and that these keys (currency, cost) are not empty.
@@ -2158,7 +2170,7 @@ class Exchange:
def calculate_fee_rate(
self, fee: dict, symbol: str, cost: float, amount: float
) -> Optional[float]:
) -> float | None:
"""
Calculate fee rate if it's not given by the exchange.
:param fee: ccxt Fee dict - must contain cost / currency / rate
@@ -2197,8 +2209,8 @@ class Exchange:
return round((fee_cost * fee_to_quote_rate) / cost, 8)
def extract_cost_curr_rate(
self, fee: dict, symbol: str, cost: float, amount: float
) -> tuple[float, str, Optional[float]]:
self, fee: dict[str, Any], symbol: str, cost: float, amount: float
) -> tuple[float, str, float | None]:
"""
Extract tuple of cost, currency, rate.
Requires order_has_fee to run first!
@@ -2223,7 +2235,7 @@ class Exchange:
since_ms: int,
candle_type: CandleType,
is_new_pair: bool = False,
until_ms: Optional[int] = None,
until_ms: int | None = None,
) -> DataFrame:
"""
Get candle history using asyncio and returns the list of candles.
@@ -2257,7 +2269,7 @@ class Exchange:
candle_type: CandleType,
is_new_pair: bool = False,
raise_: bool = False,
until_ms: Optional[int] = None,
until_ms: int | None = None,
) -> OHLCVResponse:
"""
Download historic ohlcv
@@ -2302,7 +2314,7 @@ class Exchange:
pair: str,
timeframe: str,
candle_type: CandleType,
since_ms: Optional[int],
since_ms: int | None,
cache: bool,
) -> Coroutine[Any, Any, OHLCVResponse]:
not_all_data = cache and self.required_candle_call_count > 1
@@ -2371,7 +2383,7 @@ class Exchange:
)
def _build_ohlcv_dl_jobs(
self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool
self, pair_list: ListPairsWithTimeframes, since_ms: int | None, cache: bool
) -> tuple[list[Coroutine], list[PairWithTimeframe]]:
"""
Build Coroutines to execute as part of refresh_latest_ohlcv
@@ -2448,9 +2460,9 @@ class Exchange:
self,
pair_list: ListPairsWithTimeframes,
*,
since_ms: Optional[int] = None,
since_ms: int | None = None,
cache: bool = True,
drop_incomplete: Optional[bool] = None,
drop_incomplete: bool | None = None,
) -> dict[PairWithTimeframe, DataFrame]:
"""
Refresh in-memory OHLCV asynchronously and set `_klines` with the result
@@ -2544,7 +2556,7 @@ class Exchange:
pair: str,
timeframe: str,
candle_type: CandleType,
since_ms: Optional[int] = None,
since_ms: int | None = None,
) -> OHLCVResponse:
"""
Asynchronously get candle history data using fetch_ohlcv
@@ -2618,7 +2630,7 @@ class Exchange:
pair: str,
timeframe: str,
limit: int,
since_ms: Optional[int] = None,
since_ms: int | None = None,
) -> list[list]:
"""
Fetch funding rate history - used to selectively override this by subclasses.
@@ -2677,7 +2689,7 @@ class Exchange:
async def _build_trades_dl_jobs(
self, pairwt: PairWithTimeframe, data_handler, cache: bool
) -> tuple[PairWithTimeframe, Optional[DataFrame]]:
) -> tuple[PairWithTimeframe, DataFrame | None]:
"""
Build coroutines to refresh trades for (they're then called through async.gather)
"""
@@ -2821,7 +2833,7 @@ class Exchange:
@retrier_async
async def _async_fetch_trades(
self, pair: str, since: Optional[int] = None, params: Optional[dict] = None
self, pair: str, since: int | None = None, params: dict | None = None
) -> tuple[list[list], Any]:
"""
Asynchronously gets trade history using fetch_trades.
@@ -2880,8 +2892,16 @@ class Exchange:
else:
return trades[-1].get("timestamp")
async def _async_get_trade_history_id_startup(
self, pair: str, since: int | None
) -> tuple[list[list], str]:
"""
override for initial trade_history_id call
"""
return await self._async_fetch_trades(pair, since=since)
async def _async_get_trade_history_id(
self, pair: str, until: int, since: Optional[int] = None, from_id: Optional[str] = None
self, pair: str, until: int, since: int | None = None, from_id: str | None = None
) -> tuple[str, list[list]]:
"""
Asynchronously gets trade history using fetch_trades
@@ -2906,7 +2926,7 @@ class Exchange:
# of up to an hour.
# e.g. Binance returns the "last 1000" candles within a 1h time interval
# - so we will miss the first trades.
t, from_id = await self._async_fetch_trades(pair, since=since)
t, from_id = await self._async_get_trade_history_id_startup(pair, since=since)
trades.extend(t[x])
while True:
try:
@@ -2936,7 +2956,7 @@ class Exchange:
return (pair, trades)
async def _async_get_trade_history_time(
self, pair: str, until: int, since: Optional[int] = None
self, pair: str, until: int, since: int | None = None
) -> tuple[str, list[list]]:
"""
Asynchronously gets trade history using fetch_trades,
@@ -2977,9 +2997,9 @@ class Exchange:
async def _async_get_trade_history(
self,
pair: str,
since: Optional[int] = None,
until: Optional[int] = None,
from_id: Optional[str] = None,
since: int | None = None,
until: int | None = None,
from_id: str | None = None,
) -> tuple[str, list[list]]:
"""
Async wrapper handling downloading trades using either time or id based methods.
@@ -3008,9 +3028,9 @@ class Exchange:
def get_historic_trades(
self,
pair: str,
since: Optional[int] = None,
until: Optional[int] = None,
from_id: Optional[str] = None,
since: int | None = None,
until: int | None = None,
from_id: str | None = None,
) -> tuple[str, list]:
"""
Get trade history data using asyncio.
@@ -3039,7 +3059,7 @@ class Exchange:
return self.loop.run_until_complete(task)
@retrier
def _get_funding_fees_from_exchange(self, pair: str, since: Union[datetime, int]) -> float:
def _get_funding_fees_from_exchange(self, pair: str, since: datetime | int) -> float:
"""
Returns the sum of all funding fees that were exchanged for a pair within a timeframe
Dry-run handling happens as part of _calculate_funding_fees.
@@ -3170,8 +3190,8 @@ class Exchange:
file_dump_json(filename, data)
def load_cached_leverage_tiers(
self, stake_currency: str, cache_time: Optional[timedelta] = None
) -> Optional[dict[str, list[dict]]]:
self, stake_currency: str, cache_time: timedelta | None = None
) -> dict[str, list[dict]] | None:
"""
Load cached leverage tiers from disk
:param cache_time: The maximum age of the cache before it is considered outdated
@@ -3189,7 +3209,7 @@ class Exchange:
if updated_dt < datetime.now(timezone.utc) - cache_time:
logger.info("Cached leverage tiers are outdated. Will update.")
return None
return tiers["data"]
return tiers.get("data")
except Exception:
logger.exception("Error loading cached leverage tiers. Refreshing.")
return None
@@ -3216,7 +3236,7 @@ class Exchange:
"maintAmt": float(info["cum"]) if "cum" in info else None,
}
def get_max_leverage(self, pair: str, stake_amount: Optional[float]) -> float:
def get_max_leverage(self, pair: str, stake_amount: float | None) -> float:
"""
Returns the maximum leverage that a pair can be traded at
:param pair: The base/quote currency pair being traded
@@ -3294,7 +3314,7 @@ class Exchange:
def _set_leverage(
self,
leverage: float,
pair: Optional[str] = None,
pair: str | None = None,
accept_fail: bool = False,
):
"""
@@ -3346,7 +3366,7 @@ class Exchange:
pair: str,
margin_mode: MarginMode,
accept_fail: bool = False,
params: Optional[dict] = None,
params: dict | None = None,
):
"""
Set's the margin mode on the exchange to cross or isolated for a specific pair
@@ -3381,7 +3401,7 @@ class Exchange:
amount: float,
is_short: bool,
open_date: datetime,
close_date: Optional[datetime] = None,
close_date: datetime | None = None,
) -> float:
"""
Fetches and calculates the sum of all funding fees that occurred for a pair
@@ -3434,7 +3454,7 @@ class Exchange:
@staticmethod
def combine_funding_and_mark(
funding_rates: DataFrame, mark_rates: DataFrame, futures_funding_rate: Optional[int] = None
funding_rates: DataFrame, mark_rates: DataFrame, futures_funding_rate: int | None = None
) -> DataFrame:
"""
Combine funding-rates and mark-rates dataframes
@@ -3475,7 +3495,7 @@ class Exchange:
is_short: bool,
open_date: datetime,
close_date: datetime,
time_in_ratio: Optional[float] = None,
time_in_ratio: float | None = None,
) -> float:
"""
calculates the sum of all funding fees that occurred for a pair during a futures trade
@@ -3533,8 +3553,8 @@ class Exchange:
stake_amount: float,
leverage: float,
wallet_balance: float,
open_trades: Optional[list] = None,
) -> Optional[float]:
open_trades: list | None = None,
) -> float | None:
"""
Set's the margin mode on the exchange to cross or isolated for a specific pair
"""
@@ -3582,7 +3602,7 @@ class Exchange:
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> Optional[float]:
) -> float | None:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
PERPETUAL:
@@ -3633,7 +3653,7 @@ class Exchange:
self,
pair: str,
notional_value: float,
) -> tuple[float, Optional[float]]:
) -> tuple[float, float | None]:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
:param pair: Market symbol

View File

@@ -1,4 +1,4 @@
from typing import Optional, TypedDict
from typing import Any, Literal, TypedDict
from freqtrade.enums import CandleType
@@ -11,7 +11,7 @@ class FtHas(TypedDict, total=False):
# Stoploss on exchange
stoploss_on_exchange: bool
stop_price_param: str
stop_price_prop: str
stop_price_prop: Literal["stopPrice", "stopLossPrice"]
stop_price_type_field: str
stop_price_type_value_mapping: dict
stoploss_order_types: dict[str, str]
@@ -35,16 +35,17 @@ class FtHas(TypedDict, total=False):
trades_has_history: bool
trades_pagination_overlap: bool
# Orderbook
l2_limit_range: Optional[list[int]]
l2_limit_range: list[int] | None
l2_limit_range_required: bool
# Futures
ccxt_futures_name: str # usually swap
mark_ohlcv_price: str
mark_ohlcv_timeframe: str
funding_fee_timeframe: str
funding_fee_candle_limit: int
floor_leverage: bool
needs_trading_fees: bool
order_props_in_contracts: list[str]
order_props_in_contracts: list[Literal["amount", "cost", "filled", "remaining"]]
# Websocket control
ws_enabled: bool
@@ -52,14 +53,14 @@ class FtHas(TypedDict, total=False):
class Ticker(TypedDict):
symbol: str
ask: Optional[float]
askVolume: Optional[float]
bid: Optional[float]
bidVolume: Optional[float]
last: Optional[float]
quoteVolume: Optional[float]
baseVolume: Optional[float]
percentage: Optional[float]
ask: float | None
askVolume: float | None
bid: float | None
bidVolume: float | None
last: float | None
quoteVolume: float | None
baseVolume: float | None
percentage: float | None
# Several more - only listing required.
@@ -70,9 +71,9 @@ class OrderBook(TypedDict):
symbol: str
bids: list[tuple[float, float]]
asks: list[tuple[float, float]]
timestamp: Optional[int]
datetime: Optional[str]
nonce: Optional[int]
timestamp: int | None
datetime: str | None
nonce: int | None
class CcxtBalance(TypedDict):
@@ -89,10 +90,12 @@ class CcxtPosition(TypedDict):
side: str
contracts: float
leverage: float
collateral: Optional[float]
initialMargin: Optional[float]
liquidationPrice: Optional[float]
collateral: float | None
initialMargin: float | None
liquidationPrice: float | None
CcxtOrder = dict[str, Any]
# pair, timeframe, candleType, OHLCV, drop last?,
OHLCVResponse = tuple[str, str, CandleType, list, bool]

View File

@@ -5,7 +5,7 @@ Exchange support utils
import inspect
from datetime import datetime, timedelta, timezone
from math import ceil, floor
from typing import Any, Optional
from typing import Any
import ccxt
from ccxt import (
@@ -33,20 +33,18 @@ from freqtrade.util import FtPrecise
CcxtModuleType = Any
def is_exchange_known_ccxt(
exchange_name: str, ccxt_module: Optional[CcxtModuleType] = None
) -> bool:
def is_exchange_known_ccxt(exchange_name: str, ccxt_module: CcxtModuleType | None = None) -> bool:
return exchange_name in ccxt_exchanges(ccxt_module)
def ccxt_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> list[str]:
def ccxt_exchanges(ccxt_module: CcxtModuleType | None = None) -> list[str]:
"""
Return the list of all exchanges known to ccxt
"""
return ccxt_module.exchanges if ccxt_module is not None else ccxt.exchanges
def available_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> list[str]:
def available_exchanges(ccxt_module: CcxtModuleType | None = None) -> list[str]:
"""
Return exchanges available to the bot, i.e. non-bad exchanges in the ccxt list
"""
@@ -54,7 +52,7 @@ def available_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> list[st
return [x for x in exchanges if validate_exchange(x)[0]]
def validate_exchange(exchange: str) -> tuple[bool, str, Optional[ccxt.Exchange]]:
def validate_exchange(exchange: str) -> tuple[bool, str, ccxt.Exchange | None]:
"""
returns: can_use, reason, exchange_object
with Reason including both missing and missing_opt
@@ -137,9 +135,7 @@ def list_available_exchanges(all_exchanges: bool) -> list[ValidExchangesType]:
return exchanges_valid
def date_minus_candles(
timeframe: str, candle_count: int, date: Optional[datetime] = None
) -> datetime:
def date_minus_candles(timeframe: str, candle_count: int, date: datetime | None = None) -> datetime:
"""
subtract X candles from a date.
:param timeframe: timeframe in string format (e.g. "5m")
@@ -166,7 +162,7 @@ def market_is_active(market: dict) -> bool:
return market.get("active", True) is not False
def amount_to_contracts(amount: float, contract_size: Optional[float]) -> float:
def amount_to_contracts(amount: float, contract_size: float | None) -> float:
"""
Convert amount to contracts.
:param amount: amount to convert
@@ -179,7 +175,7 @@ def amount_to_contracts(amount: float, contract_size: Optional[float]) -> float:
return amount
def contracts_to_amount(num_contracts: float, contract_size: Optional[float]) -> float:
def contracts_to_amount(num_contracts: float, contract_size: float | None) -> float:
"""
Takes num-contracts and converts it to contract size
:param num_contracts: number of contracts
@@ -194,7 +190,7 @@ def contracts_to_amount(num_contracts: float, contract_size: Optional[float]) ->
def amount_to_precision(
amount: float, amount_precision: Optional[float], precisionMode: Optional[int]
amount: float, amount_precision: float | None, precisionMode: int | None
) -> float:
"""
Returns the amount to buy or sell to a precision the Exchange accepts
@@ -224,9 +220,9 @@ def amount_to_precision(
def amount_to_contract_precision(
amount,
amount_precision: Optional[float],
precisionMode: Optional[int],
contract_size: Optional[float],
amount_precision: float | None,
precisionMode: int | None,
contract_size: float | None,
) -> float:
"""
Returns the amount to buy or sell to a precision the Exchange accepts
@@ -285,8 +281,8 @@ def __price_to_precision_significant_digits(
def price_to_precision(
price: float,
price_precision: Optional[float],
precisionMode: Optional[int],
price_precision: float | None,
precisionMode: int | None,
*,
rounding_mode: int = ROUND,
) -> float:

View File

@@ -1,5 +1,4 @@
from datetime import datetime, timezone
from typing import Optional
import ccxt
from ccxt import ROUND_DOWN, ROUND_UP
@@ -51,7 +50,7 @@ def timeframe_to_resample_freq(timeframe: str) -> str:
return resample_interval
def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
def timeframe_to_prev_date(timeframe: str, date: datetime | None = None) -> datetime:
"""
Use Timeframe and determine the candle start date for this date.
Does not round when given a candle start date.
@@ -66,7 +65,7 @@ def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> d
return dt_from_ts(new_timestamp)
def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
def timeframe_to_next_date(timeframe: str, date: datetime | None = None) -> datetime:
"""
Use Timeframe and determine next candle.
:param timeframe: timeframe in string format (e.g. "5m")

View File

@@ -2,12 +2,15 @@
import logging
from datetime import datetime
from typing import Any, Optional
import ccxt
from freqtrade.constants import BuySell
from freqtrade.enums import MarginMode, PriceType, TradingMode
from freqtrade.exceptions import DDosProtection, OperationalException, TemporaryError
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.common import retrier
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.misc import safe_value_fallback2
@@ -24,6 +27,8 @@ class Gate(Exchange):
may still not work as expected.
"""
unified_account = False
_ft_has: FtHas = {
"ohlcv_candle_limit": 1000,
"order_time_in_force": ["GTC", "IOC"],
@@ -38,6 +43,7 @@ class Gate(Exchange):
_ft_has_futures: FtHas = {
"needs_trading_fees": True,
"marketOrderRequiresPrice": False,
"funding_fee_candle_limit": 90,
"stop_price_type_field": "price_type",
"stop_price_type_value_mapping": {
PriceType.LAST: 0,
@@ -53,6 +59,35 @@ class Gate(Exchange):
(TradingMode.FUTURES, MarginMode.ISOLATED)
]
@retrier
def additional_exchange_init(self) -> None:
"""
Additional exchange initialization logic.
.api will be available at this point.
Must be overridden in child methods if required.
"""
try:
if not self._config["dry_run"]:
# TODO: This should work with 4.4.34 and later.
self._api.load_unified_status()
is_unified = self._api.options.get("unifiedAccount")
# Returns a tuple of bools, first for margin, second for Account
if is_unified:
self.unified_account = True
logger.info("Gate: Unified account.")
else:
self.unified_account = False
logger.info("Gate: Classic account.")
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
raise TemporaryError(
f"Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}"
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
def _get_params(
self,
side: BuySell,
@@ -74,7 +109,7 @@ class Gate(Exchange):
return params
def get_trades_for_order(
self, order_id: str, pair: str, since: datetime, params: Optional[dict] = None
self, order_id: str, pair: str, since: datetime, params: dict | None = None
) -> list:
trades = super().get_trades_for_order(order_id, pair, since, params)
@@ -99,10 +134,12 @@ class Gate(Exchange):
}
return trades
def get_order_id_conditional(self, order: dict[str, Any]) -> str:
def get_order_id_conditional(self, order: CcxtOrder) -> str:
return safe_value_fallback2(order, order, "id_stop", "id")
def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
order = self.fetch_order(order_id=order_id, pair=pair, params={"stop": True})
if order.get("status", "open") == "closed":
# Places a real order - which we need to fetch explicitly.
@@ -119,7 +156,5 @@ class Gate(Exchange):
return order1
return order
def cancel_stoploss_order(
self, order_id: str, pair: str, params: Optional[dict] = None
) -> dict:
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True})

View File

@@ -1,10 +1,14 @@
"""Hyperliquid exchange subclass"""
import logging
from datetime import datetime
from freqtrade.enums import TradingMode
from freqtrade.constants import BuySell
from freqtrade.enums import MarginMode, TradingMode
from freqtrade.exceptions import ExchangeError, OperationalException
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.util.datetime_helpers import dt_from_ts
logger = logging.getLogger(__name__)
@@ -16,20 +20,162 @@ class Hyperliquid(Exchange):
"""
_ft_has: FtHas = {
# Only the most recent 5000 candles are available according to the
# exchange's API documentation.
"ohlcv_has_history": False,
"ohlcv_candle_limit": 5000,
"trades_has_history": False, # Trades endpoint doesn't seem available.
"l2_limit_range": [20],
"trades_has_history": False,
"tickers_have_bid_ask": False,
"stoploss_on_exchange": False,
"exchange_has_overrides": {"fetchTrades": False},
"marketOrderRequiresPrice": True,
}
_ft_has_futures: FtHas = {
"stoploss_on_exchange": True,
"stoploss_order_types": {"limit": "limit"},
"stop_price_prop": "stopPrice",
"funding_fee_timeframe": "1h",
"funding_fee_candle_limit": 500,
}
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [
(TradingMode.FUTURES, MarginMode.ISOLATED)
]
@property
def _ccxt_config(self) -> dict:
# Parameters to add directly to ccxt sync/async initialization.
# ccxt defaults to swap mode.
# ccxt Hyperliquid defaults to swap
config = {}
if self.trading_mode == TradingMode.SPOT:
config.update({"options": {"defaultType": "spot"}})
config.update(super()._ccxt_config)
return config
def get_max_leverage(self, pair: str, stake_amount: float | None) -> float:
# There are no leverage tiers
if self.trading_mode == TradingMode.FUTURES:
return self.markets[pair]["limits"]["leverage"]["max"]
else:
return 1.0
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
if self.trading_mode != TradingMode.SPOT:
# Hyperliquid expects leverage to be an int
leverage = int(leverage)
# Hyperliquid needs the parameter leverage.
# Don't use _set_leverage(), as this sets margin back to cross
self.set_margin_mode(pair, self.margin_mode, params={"leverage": leverage})
def dry_run_liquidation_price(
self,
pair: str,
open_rate: float, # Entry price of position
is_short: bool,
amount: float,
stake_amount: float,
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> float | None:
"""
Optimized
Docs: https://hyperliquid.gitbook.io/hyperliquid-docs/trading/liquidations
Below can be done in fewer lines of code, but like this it matches the documentation.
Tested with 196 unique ccxt fetch_positions() position outputs
- Only first output per position where pnl=0.0
- Compare against returned liquidation price
Positions: 197 Average deviation: 0.00028980% Max deviation: 0.01309453%
Positions info:
{'leverage': {1.0: 23, 2.0: 155, 3.0: 8, 4.0: 7, 5.0: 4},
'side': {'long': 133, 'short': 64},
'symbol': {'BTC/USDC:USDC': 81,
'DOGE/USDC:USDC': 20,
'ETH/USDC:USDC': 53,
'SOL/USDC:USDC': 43}}
"""
# Defining/renaming variables to match the documentation
isolated_margin = wallet_balance
position_size = amount
price = open_rate
position_value = price * position_size
max_leverage = self.markets[pair]["limits"]["leverage"]["max"]
# Docs: The maintenance margin is half of the initial margin at max leverage,
# which varies from 3-50x. In other words, the maintenance margin is between 1%
# (for 50x max leverage assets) and 16.7% (for 3x max leverage assets)
# depending on the asset
# The key thing here is 'Half of the initial margin at max leverage'.
# A bit ambiguous, but this interpretation leads to accurate results:
# 1. Start from the position value
# 2. Assume max leverage, calculate the initial margin by dividing the position value
# by the max leverage
# 3. Divide this by 2
maintenance_margin_required = position_value / max_leverage / 2
# Docs: margin_available (isolated) = isolated_margin - maintenance_margin_required
margin_available = isolated_margin - maintenance_margin_required
# Docs: The maintenance margin is half of the initial margin at max leverage
# The docs don't explicitly specify maintenance leverage, but this works.
# Double because of the statement 'half of the initial margin at max leverage'
maintenance_leverage = max_leverage * 2
# Docs: l = 1 / MAINTENANCE_LEVERAGE (Using 'll' to comply with PEP8: E741)
ll = 1 / maintenance_leverage
# Docs: side = 1 for long and -1 for short
side = -1 if is_short else 1
# Docs: liq_price = price - side * margin_available / position_size / (1 - l * side)
liq_price = price - side * margin_available / position_size / (1 - ll * side)
if self.trading_mode == TradingMode.FUTURES:
return liq_price
else:
raise OperationalException(
"Freqtrade only supports isolated futures for leverage trading"
)
def get_funding_fees(
self, pair: str, amount: float, is_short: bool, open_date: datetime
) -> float:
"""
Fetch funding fees, either from the exchange (live) or calculates them
based on funding rate/mark price history
:param pair: The quote/base pair of the trade
:param is_short: trade direction
:param amount: Trade amount
:param open_date: Open date of the trade
:return: funding fee since open_date
:raises: ExchangeError if something goes wrong.
"""
# Hyperliquid does not have fetchFundingHistory
if self.trading_mode == TradingMode.FUTURES:
try:
return self._fetch_and_calculate_funding_fees(pair, amount, is_short, open_date)
except ExchangeError:
logger.warning(f"Could not update funding fees for {pair}.")
return 0.0
def fetch_order(self, order_id: str, pair: str, params: dict | None = None) -> CcxtOrder:
order = super().fetch_order(order_id, pair, params)
if (
order["average"] is None
and order["status"] in ("canceled", "closed")
and order["filled"] > 0
):
# Hyperliquid does not fill the average price in the order response
# Fetch trades to calculate the average price to have the actual price
# the order was executed at
trades = self.get_trades_for_order(order_id, pair, since=dt_from_ts(order["timestamp"]))
if trades:
total_amount = sum(t["amount"] for t in trades)
order["average"] = (
sum(t["price"] * t["amount"] for t in trades) / total_amount
if total_amount
else None
)
return order

View File

@@ -2,7 +2,7 @@
import logging
from datetime import datetime
from typing import Any, Optional
from typing import Any
import ccxt
from pandas import DataFrame
@@ -50,12 +50,28 @@ class Kraken(Exchange):
return parent_check and market.get("darkpool", False) is False
def get_tickers(self, symbols: Optional[list[str]] = None, cached: bool = False) -> Tickers:
def get_tickers(self, symbols: list[str] | None = None, *, cached: bool = False) -> Tickers:
# Only fetch tickers for current stake currency
# Otherwise the request for kraken becomes too large.
symbols = list(self.get_markets(quote_currencies=[self._config["stake_currency"]]))
return super().get_tickers(symbols=symbols, cached=cached)
def consolidate_balances(self, balances: CcxtBalances) -> CcxtBalances:
"""
Consolidate balances for the same currency.
Kraken returns ".F" balances if rewards is enabled.
"""
consolidated = {}
for currency, balance in balances.items():
base_currency = currency[:-2] if currency.endswith(".F") else currency
if base_currency in consolidated:
consolidated[base_currency]["free"] += balance["free"]
consolidated[base_currency]["used"] += balance["used"]
consolidated[base_currency]["total"] += balance["total"]
else:
consolidated[base_currency] = balance
return consolidated
@retrier
def get_balances(self) -> CcxtBalances:
if self._config["dry_run"]:
@@ -68,6 +84,10 @@ class Kraken(Exchange):
balances.pop("free", None)
balances.pop("total", None)
balances.pop("used", None)
self._log_exchange_response("fetch_balances", balances)
# Consolidate balances
balances = self.consolidate_balances(balances)
orders = self._api.fetch_open_orders()
order_list = [
@@ -86,6 +106,7 @@ class Kraken(Exchange):
balances[bal]["used"] = sum(order[1] for order in order_list if order[0] == bal)
balances[bal]["free"] = balances[bal]["total"] - balances[bal]["used"]
self._log_exchange_response("fetch_balances2", balances)
return balances
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
@@ -99,7 +120,7 @@ class Kraken(Exchange):
def _set_leverage(
self,
leverage: float,
pair: Optional[str] = None,
pair: str | None = None,
accept_fail: bool = False,
):
"""
@@ -137,7 +158,7 @@ class Kraken(Exchange):
is_short: bool,
open_date: datetime,
close_date: datetime,
time_in_ratio: Optional[float] = None,
time_in_ratio: float | None = None,
) -> float:
"""
# ! This method will always error when run by Freqtrade because time_in_ratio is never

View File

@@ -4,7 +4,7 @@ import logging
from freqtrade.constants import BuySell
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
logger = logging.getLogger(__name__)
@@ -47,7 +47,7 @@ class Kucoin(Exchange):
leverage: float,
reduceOnly: bool = False,
time_in_force: str = "GTC",
) -> dict:
) -> CcxtOrder:
res = super().create_order(
pair=pair,
ordertype=ordertype,

View File

@@ -0,0 +1,21 @@
"""Lbank exchange subclass"""
import logging
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
logger = logging.getLogger(__name__)
class Lbank(Exchange):
"""
Lbank exchange class. Contains adjustments needed for Freqtrade to work
with this exchange.
"""
_ft_has: FtHas = {
"ohlcv_candle_limit": 1998, # lower than the allowed 2000 to avoid current_candle issue
"trades_has_history": False,
}

View File

@@ -1,6 +1,5 @@
import logging
from datetime import timedelta
from typing import Any, Optional
import ccxt
@@ -14,7 +13,7 @@ from freqtrade.exceptions import (
)
from freqtrade.exchange import Exchange, date_minus_candles
from freqtrade.exchange.common import API_RETRY_COUNT, retrier
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.misc import safe_value_fallback2
from freqtrade.util import dt_now, dt_ts
@@ -60,7 +59,7 @@ class Okx(Exchange):
_ccxt_params: dict = {"options": {"brokerId": "ffb5405ad327SUDE"}}
def ohlcv_candle_limit(
self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None
self, timeframe: str, candle_type: CandleType, since_ms: int | None = None
) -> int:
"""
Exchange ohlcv candle limit
@@ -191,7 +190,7 @@ class Okx(Exchange):
params["posSide"] = self._get_posSide(side, True)
return params
def _convert_stop_order(self, pair: str, order_id: str, order: dict) -> dict:
def _convert_stop_order(self, pair: str, order_id: str, order: CcxtOrder) -> CcxtOrder:
if (
order.get("status", "open") == "closed"
and (real_order_id := order.get("info", {}).get("ordId")) is not None
@@ -209,7 +208,9 @@ class Okx(Exchange):
return order
@retrier(retries=API_RETRY_COUNT)
def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
if self._config["dry_run"]:
return self.fetch_dry_run_order(order_id)
@@ -231,7 +232,7 @@ class Okx(Exchange):
return self._fetch_stop_order_fallback(order_id, pair)
def _fetch_stop_order_fallback(self, order_id: str, pair: str) -> dict:
def _fetch_stop_order_fallback(self, order_id: str, pair: str) -> CcxtOrder:
params2 = {"stop": True, "ordType": "conditional"}
for method in (
self._api.fetch_open_orders,
@@ -256,14 +257,12 @@ class Okx(Exchange):
raise OperationalException(e) from e
raise RetryableOrderError(f"StoplossOrder not found (pair: {pair} id: {order_id}).")
def get_order_id_conditional(self, order: dict[str, Any]) -> str:
def get_order_id_conditional(self, order: CcxtOrder) -> str:
if order.get("type", "") == "stop":
return safe_value_fallback2(order, order, "id_stop", "id")
return order["id"]
def cancel_stoploss_order(
self, order_id: str, pair: str, params: Optional[dict] = None
) -> dict:
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
params1 = {"stop": True}
# 'ordType': 'conditional'
#
@@ -273,7 +272,7 @@ class Okx(Exchange):
params=params1,
)
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[dict]:
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[CcxtOrder]:
orders = []
orders = self._api.fetch_closed_orders(pair, since=since_ms)

View File

@@ -2,7 +2,6 @@ import logging
import random
from abc import abstractmethod
from enum import Enum
from typing import Optional, Union
import gymnasium as gym
import numpy as np
@@ -140,7 +139,7 @@ class BaseEnvironment(gym.Env):
self._end_tick: int = len(self.prices) - 1
self._done: bool = False
self._current_tick: int = self._start_tick
self._last_trade_tick: Optional[int] = None
self._last_trade_tick: int | None = None
self._position = Positions.Neutral
self._position_history: list = [None]
self.total_reward: float = 0
@@ -173,8 +172,8 @@ class BaseEnvironment(gym.Env):
def tensorboard_log(
self,
metric: str,
value: Optional[Union[int, float]] = None,
inc: Optional[bool] = None,
value: int | float | None = None,
inc: bool | None = None,
category: str = "custom",
):
"""

View File

@@ -2,9 +2,10 @@ import copy
import importlib
import logging
from abc import abstractmethod
from collections.abc import Callable
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Callable, Optional, Union
from typing import Any
import gymnasium as gym
import numpy as np
@@ -49,9 +50,9 @@ class BaseReinforcementLearningModel(IFreqaiModel):
)
th.set_num_threads(self.max_threads)
self.reward_params = self.freqai_info["rl_config"]["model_reward_parameters"]
self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_callback: Optional[MaskableEvalCallback] = None
self.train_env: VecMonitor | SubprocVecEnv | gym.Env = gym.Env()
self.eval_env: VecMonitor | SubprocVecEnv | gym.Env = gym.Env()
self.eval_callback: MaskableEvalCallback | None = None
self.model_type = self.freqai_info["rl_config"]["model_type"]
self.rl_config = self.freqai_info["rl_config"]
self.df_raw: DataFrame = DataFrame()

View File

@@ -39,8 +39,8 @@ class BasePyTorchClassifier(BasePyTorchModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.class_name_to_index = None
self.index_to_class_name = None
self.class_name_to_index = {}
self.index_to_class_name = {}
def predict(
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs

View File

@@ -20,7 +20,11 @@ class BasePyTorchModel(IFreqaiModel, ABC):
def __init__(self, **kwargs):
super().__init__(config=kwargs["config"])
self.dd.model_type = "pytorch"
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.device = (
"mps"
if torch.backends.mps.is_available() and torch.backends.mps.is_built()
else ("cuda" if torch.cuda.is_available() else "cpu")
)
test_size = self.freqai_info.get("data_split_parameters", {}).get("test_size")
self.splits = ["train", "test"] if test_size != 0 else ["train"]
self.window_size = self.freqai_info.get("conv_width", 1)

View File

@@ -5,7 +5,7 @@ import random
import shutil
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
from typing import Any
import numpy as np
import numpy.typing as npt
@@ -111,7 +111,7 @@ class FreqaiDataKitchen:
def set_paths(
self,
pair: str,
trained_timestamp: Optional[int] = None,
trained_timestamp: int | None = None,
) -> None:
"""
Set the paths to the data for the present coin/botloop

View File

@@ -5,7 +5,7 @@ from abc import ABC, abstractmethod
from collections import deque
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Literal, Optional
from typing import Any, Literal
import datasieve.transforms as ds
import numpy as np
@@ -106,7 +106,7 @@ class IFreqaiModel(ABC):
self._threads: list[threading.Thread] = []
self._stop_event = threading.Event()
self.metadata: dict[str, Any] = self.dd.load_global_metadata_from_disk()
self.data_provider: Optional[DataProvider] = None
self.data_provider: DataProvider | None = None
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
self.can_short = True # overridden in start() with strategy.can_short
self.model: Any = None
@@ -185,6 +185,7 @@ class IFreqaiModel(ABC):
Callback for Subclasses to override to include logic for shutting down resources
when SIGINT is sent.
"""
self.dd.save_historic_predictions_to_disk()
return
def shutdown(self):
@@ -198,9 +199,16 @@ class IFreqaiModel(ABC):
self.data_provider = None
self._on_stop()
logger.info("Waiting on Training iteration")
for _thread in self._threads:
_thread.join()
if self.freqai_info.get("wait_for_training_iteration_on_reload", True):
logger.info("Waiting on Training iteration")
for _thread in self._threads:
_thread.join()
else:
logger.warning(
"Breaking current training iteration because "
"you set wait_for_training_iteration_on_reload to "
" False."
)
def start_scanning(self, *args, **kwargs) -> None:
"""
@@ -286,7 +294,9 @@ class IFreqaiModel(ABC):
# tr_backtest is the backtesting time range e.g. the week directly
# following tr_train. Both of these windows slide through the
# entire backtest
for tr_train, tr_backtest in zip(dk.training_timeranges, dk.backtesting_timeranges):
for tr_train, tr_backtest in zip(
dk.training_timeranges, dk.backtesting_timeranges, strict=False
):
(_, _) = self.dd.get_pair_dict_info(pair)
train_it += 1
total_trains = len(dk.backtesting_timeranges)

View File

@@ -1,6 +1,6 @@
import logging
from pathlib import Path
from typing import Any, Optional
from typing import Any
import torch as th
from stable_baselines3.common.callbacks import ProgressBarCallback
@@ -78,7 +78,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
model = self.dd.model_dictionary[dk.pair]
model.set_env(self.train_env)
callbacks: list[Any] = [self.eval_callback, self.tensorboard_callback]
progressbar_callback: Optional[ProgressBarCallback] = None
progressbar_callback: ProgressBarCallback | None = None
if self.rl_config.get("progress_bar", False):
progressbar_callback = ProgressBarCallback()
callbacks.insert(0, progressbar_callback)

View File

@@ -1,5 +1,5 @@
from enum import Enum
from typing import Any, Union
from typing import Any
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.logger import HParam
@@ -27,7 +27,7 @@ class TensorboardCallback(BaseCallback):
# "batch_size": self.model.batch_size,
# "n_steps": self.model.n_steps,
}
metric_dict: dict[str, Union[float, int]] = {
metric_dict: dict[str, float | int] = {
"eval/mean_reward": 0,
"rollout/ep_rew_mean": 0,
"rollout/ep_len_mean": 0,

View File

@@ -45,7 +45,7 @@ class TensorBoardCallback(BaseTensorBoardCallback):
return False
evals = ["validation", "train"]
for metric, eval_ in zip(evals_log.items(), evals):
for metric, eval_ in zip(evals_log.items(), evals, strict=False):
for metric_name, log in metric[1].items():
score = log[-1][0] if isinstance(log[-1], tuple) else log[-1]
self.writer.add_scalar(f"{eval_}-{metric_name}", score, epoch)

View File

@@ -1,6 +1,6 @@
import logging
from pathlib import Path
from typing import Any, Optional
from typing import Any
import pandas as pd
import torch
@@ -50,8 +50,8 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
self.criterion = criterion
self.model_meta_data = model_meta_data
self.device = device
self.n_epochs: Optional[int] = kwargs.get("n_epochs", 10)
self.n_steps: Optional[int] = kwargs.get("n_steps", None)
self.n_epochs: int | None = kwargs.get("n_epochs", 10)
self.n_steps: int | None = kwargs.get("n_steps", None)
if self.n_steps is None and not self.n_epochs:
raise Exception("Either `n_steps` or `n_epochs` should be set.")

View File

@@ -107,7 +107,7 @@ def plot_feature_importance(
# Extract feature importance from model
models = {}
if "FreqaiMultiOutputRegressor" in str(model.__class__):
for estimator, label in zip(model.estimators_, dk.label_list):
for estimator, label in zip(model.estimators_, dk.label_list, strict=False):
models[label] = estimator
else:
models[dk.label_list[0]] = model

View File

@@ -9,7 +9,7 @@ from datetime import datetime, time, timedelta, timezone
from math import isclose
from threading import Lock
from time import sleep
from typing import Any, Optional
from typing import Any
from schedule import Scheduler
@@ -43,6 +43,7 @@ from freqtrade.exchange import (
timeframe_to_next_date,
timeframe_to_seconds,
)
from freqtrade.exchange.exchange_types import CcxtOrder
from freqtrade.leverage.liquidation_price import update_liquidation_prices
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
from freqtrade.mixins import LoggingMixin
@@ -111,7 +112,7 @@ class FreqtradeBot(LoggingMixin):
self.trading_mode: TradingMode = self.config.get("trading_mode", TradingMode.SPOT)
self.margin_mode: MarginMode = self.config.get("margin_mode", MarginMode.NONE)
self.last_process: Optional[datetime] = None
self.last_process: datetime | None = None
# RPC runs in separate threads, can start handling external commands just after
# initialization, even before Freqtradebot has a chance to start its throttling,
@@ -325,7 +326,7 @@ class FreqtradeBot(LoggingMixin):
}
self.rpc.send_msg(msg)
def _refresh_active_whitelist(self, trades: Optional[list[Trade]] = None) -> list[str]:
def _refresh_active_whitelist(self, trades: list[Trade] | None = None) -> list[str]:
"""
Refresh active whitelist from pairlist or edge and extend it with
pairs that have open trades.
@@ -579,7 +580,7 @@ class FreqtradeBot(LoggingMixin):
logger.warning(
f"{trade} has a total of {trade.amount} {trade.base_currency}, "
f"but the Wallet shows a total of {total} {trade.base_currency}. "
f"Adjusting trade amount to {total}."
f"Adjusting trade amount to {total}. "
"This may however lead to further issues."
)
trade.amount = total
@@ -587,7 +588,7 @@ class FreqtradeBot(LoggingMixin):
logger.warning(
f"{trade} has a total of {trade.amount} {trade.base_currency}, "
f"but the Wallet shows a total of {total} {trade.base_currency}. "
"Refusing to adjust as the difference is too large."
"Refusing to adjust as the difference is too large. "
"This may however lead to further issues."
)
if prev_trade_amount != trade.amount:
@@ -865,14 +866,14 @@ class FreqtradeBot(LoggingMixin):
self,
pair: str,
stake_amount: float,
price: Optional[float] = None,
price: float | None = None,
*,
is_short: bool = False,
ordertype: Optional[str] = None,
enter_tag: Optional[str] = None,
trade: Optional[Trade] = None,
ordertype: str | None = None,
enter_tag: str | None = None,
trade: Trade | None = None,
mode: EntryExecuteMode = "initial",
leverage_: Optional[float] = None,
leverage_: float | None = None,
) -> bool:
"""
Executes an entry for the given pair
@@ -1089,13 +1090,13 @@ class FreqtradeBot(LoggingMixin):
def get_valid_enter_price_and_stake(
self,
pair: str,
price: Optional[float],
price: float | None,
stake_amount: float,
trade_side: LongShort,
entry_tag: Optional[str],
trade: Optional[Trade],
entry_tag: str | None,
trade: Trade | None,
mode: EntryExecuteMode,
leverage_: Optional[float],
leverage_: float | None,
) -> tuple[float, float, float]:
"""
Validate and eventually adjust (within limits) limit, amount and leverage
@@ -1191,7 +1192,7 @@ class FreqtradeBot(LoggingMixin):
self,
trade: Trade,
order: Order,
order_type: Optional[str],
order_type: str | None,
fill: bool = False,
sub_trade: bool = False,
) -> None:
@@ -1206,6 +1207,13 @@ class FreqtradeBot(LoggingMixin):
current_rate = self.exchange.get_rate(
trade.pair, side="entry", is_short=trade.is_short, refresh=False
)
stake_amount = trade.stake_amount
if not fill:
# If we have open orders, we need to add the stake amount of the open orders
# as it's not yet included in the trade.stake_amount
stake_amount += sum(
o.stake_amount for o in trade.open_orders if o.ft_order_side == trade.entry_side
)
msg: RPCEntryMsg = {
"trade_id": trade.id,
@@ -1219,12 +1227,12 @@ class FreqtradeBot(LoggingMixin):
"limit": open_rate, # Deprecated (?)
"open_rate": open_rate,
"order_type": order_type or "unknown",
"stake_amount": trade.stake_amount,
"stake_amount": stake_amount,
"stake_currency": self.config["stake_currency"],
"base_currency": self.exchange.get_pair_base_currency(trade.pair),
"quote_currency": self.exchange.get_pair_quote_currency(trade.pair),
"fiat_currency": self.config.get("fiat_display_currency", None),
"amount": order.safe_amount_after_fee if fill else (order.amount or trade.amount),
"amount": order.safe_amount_after_fee if fill else (order.safe_amount or trade.amount),
"open_date": trade.open_date_utc or datetime.now(timezone.utc),
"current_rate": current_rate,
"sub_trade": sub_trade,
@@ -1355,7 +1363,7 @@ class FreqtradeBot(LoggingMixin):
return False
def _check_and_execute_exit(
self, trade: Trade, exit_rate: float, enter: bool, exit_: bool, exit_tag: Optional[str]
self, trade: Trade, exit_rate: float, enter: bool, exit_: bool, exit_tag: str | None
) -> bool:
"""
Check and execute trade exit
@@ -1475,7 +1483,7 @@ class FreqtradeBot(LoggingMixin):
return False
def handle_trailing_stoploss_on_exchange(self, trade: Trade, order: dict) -> None:
def handle_trailing_stoploss_on_exchange(self, trade: Trade, order: CcxtOrder) -> None:
"""
Check to see if stoploss on exchange should be updated
in case of trailing stoploss on exchange
@@ -1513,7 +1521,7 @@ class FreqtradeBot(LoggingMixin):
f"Could not create trailing stoploss order for pair {trade.pair}."
)
def manage_trade_stoploss_orders(self, trade: Trade, stoploss_orders: list[dict]):
def manage_trade_stoploss_orders(self, trade: Trade, stoploss_orders: list[CcxtOrder]):
"""
Perform required actions according to existing stoploss orders of trade
:param trade: Corresponding Trade
@@ -1589,7 +1597,9 @@ class FreqtradeBot(LoggingMixin):
else:
self.replace_order(order, open_order, trade)
def handle_cancel_order(self, order: dict, order_obj: Order, trade: Trade, reason: str) -> None:
def handle_cancel_order(
self, order: CcxtOrder, order_obj: Order, trade: Trade, reason: str
) -> None:
"""
Check if current analyzed order timed out and cancel if necessary.
:param order: Order dict grabbed with exchange.fetch_order()
@@ -1611,7 +1621,7 @@ class FreqtradeBot(LoggingMixin):
self.emergency_exit(trade, order["price"], order["amount"])
def emergency_exit(
self, trade: Trade, price: float, sub_trade_amt: Optional[float] = None
self, trade: Trade, price: float, sub_trade_amt: float | None = None
) -> None:
try:
self.execute_trade_exit(
@@ -1641,7 +1651,7 @@ class FreqtradeBot(LoggingMixin):
)
trade.delete()
def replace_order(self, order: dict, order_obj: Optional[Order], trade: Trade) -> None:
def replace_order(self, order: CcxtOrder, order_obj: Order | None, trade: Trade) -> None:
"""
Check if current analyzed entry order should be replaced or simply cancelled.
To simply cancel the existing order(no replacement) adjust_entry_price() should return None
@@ -1784,10 +1794,10 @@ class FreqtradeBot(LoggingMixin):
def handle_cancel_enter(
self,
trade: Trade,
order: dict,
order: CcxtOrder,
order_obj: Order,
reason: str,
replacing: Optional[bool] = False,
replacing: bool | None = False,
) -> bool:
"""
entry cancel - cancel order
@@ -1868,7 +1878,9 @@ class FreqtradeBot(LoggingMixin):
)
return was_trade_fully_canceled
def handle_cancel_exit(self, trade: Trade, order: dict, order_obj: Order, reason: str) -> bool:
def handle_cancel_exit(
self, trade: Trade, order: CcxtOrder, order_obj: Order, reason: str
) -> bool:
"""
exit order cancel - cancel order and update trade
:return: True if exit order was cancelled, false otherwise
@@ -1979,9 +1991,9 @@ class FreqtradeBot(LoggingMixin):
limit: float,
exit_check: ExitCheckTuple,
*,
exit_tag: Optional[str] = None,
ordertype: Optional[str] = None,
sub_trade_amt: Optional[float] = None,
exit_tag: str | None = None,
ordertype: str | None = None,
sub_trade_amt: float | None = None,
) -> bool:
"""
Executes a trade exit for the given trade and limit
@@ -2104,10 +2116,10 @@ class FreqtradeBot(LoggingMixin):
def _notify_exit(
self,
trade: Trade,
order_type: Optional[str],
order_type: str | None,
fill: bool = False,
sub_trade: bool = False,
order: Optional[Order] = None,
order: Order | None = None,
) -> None:
"""
Sends rpc notification when a sell occurred.
@@ -2220,7 +2232,7 @@ class FreqtradeBot(LoggingMixin):
# Send the message
self.rpc.send_msg(msg)
def order_obj_or_raise(self, order_id: str, order_obj: Optional[Order]) -> Order:
def order_obj_or_raise(self, order_id: str, order_obj: Order | None) -> Order:
if not order_obj:
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened."
@@ -2234,8 +2246,8 @@ class FreqtradeBot(LoggingMixin):
def update_trade_state(
self,
trade: Trade,
order_id: Optional[str],
action_order: Optional[dict[str, Any]] = None,
order_id: str | None,
action_order: CcxtOrder | None = None,
*,
stoploss_order: bool = False,
send_msg: bool = True,
@@ -2346,7 +2358,7 @@ class FreqtradeBot(LoggingMixin):
def handle_protections(self, pair: str, side: LongShort) -> None:
# Lock pair for one candle to prevent immediate re-entries
self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason="Auto lock")
self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason="Auto lock", side=side)
prot_trig = self.protections.stop_per_pair(pair, side=side)
if prot_trig:
msg: RPCProtectionMsg = {
@@ -2372,7 +2384,7 @@ class FreqtradeBot(LoggingMixin):
amount: float,
fee_abs: float,
order_obj: Order,
) -> Optional[float]:
) -> float | None:
"""
Applies the fee to amount (either from Order or from Trades).
Can eat into dust if more than the required asset is available.
@@ -2400,7 +2412,7 @@ class FreqtradeBot(LoggingMixin):
return fee_abs
return None
def handle_order_fee(self, trade: Trade, order_obj: Order, order: dict[str, Any]) -> None:
def handle_order_fee(self, trade: Trade, order_obj: Order, order: CcxtOrder) -> None:
# Try update amount (binance-fix)
try:
fee_abs = self.get_real_amount(trade, order, order_obj)
@@ -2409,7 +2421,7 @@ class FreqtradeBot(LoggingMixin):
except DependencyException as exception:
logger.warning("Could not update trade amount: %s", exception)
def get_real_amount(self, trade: Trade, order: dict, order_obj: Order) -> Optional[float]:
def get_real_amount(self, trade: Trade, order: CcxtOrder, order_obj: Order) -> float | None:
"""
Detect and update trade fee.
Calls trade.update_fee() upon correct detection.
@@ -2469,8 +2481,8 @@ class FreqtradeBot(LoggingMixin):
return True
def fee_detection_from_trades(
self, trade: Trade, order: dict, order_obj: Order, order_amount: float, trades: list
) -> Optional[float]:
self, trade: Trade, order: CcxtOrder, order_obj: Order, order_amount: float, trades: list
) -> float | None:
"""
fee-detection fallback to Trades.
Either uses provided trades list or the result of fetch_my_trades to get correct fee.
@@ -2481,7 +2493,7 @@ class FreqtradeBot(LoggingMixin):
)
if len(trades) == 0:
logger.info("Applying fee on amount for %s failed: myTrade-Dict empty found", trade)
logger.info("Applying fee on amount for %s failed: myTrade-dict empty found", trade)
return None
fee_currency = None
amount = 0

View File

@@ -1,4 +1,4 @@
from typing import Any, Optional
from typing import Any
from typing_extensions import TypedDict
@@ -26,7 +26,7 @@ class BacktestHistoryEntryType(BacktestMetadataType):
filename: str
strategy: str
notes: str
backtest_start_ts: Optional[int]
backtest_end_ts: Optional[int]
timeframe: Optional[str]
timeframe_detail: Optional[str]
backtest_start_ts: int | None
backtest_end_ts: int | None
timeframe: str | None
timeframe_detail: str | None

View File

@@ -1,5 +1,4 @@
# Used for list-exchanges
from typing import Optional
from typing_extensions import TypedDict
@@ -17,5 +16,5 @@ class ValidExchangesType(TypedDict):
comment: str
dex: bool
is_alias: bool
alias_for: Optional[str]
alias_for: str | None
trade_modes: list[TradeModeType]

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
from freqtrade.enums import MarginMode
from freqtrade.exceptions import DependencyException
@@ -12,7 +11,7 @@ logger = logging.getLogger(__name__)
def update_liquidation_prices(
trade: Optional[LocalTrade] = None,
trade: LocalTrade | None = None,
*,
exchange: Exchange,
wallets: Wallets,

View File

@@ -1,6 +1,7 @@
import logging
from logging import Formatter
from logging.handlers import RotatingFileHandler, SysLogHandler
from pathlib import Path
from freqtrade.constants import Config
from freqtrade.exceptions import OperationalException
@@ -86,11 +87,23 @@ def setup_logging(config: Config) -> None:
handler_rf = get_existing_handlers(RotatingFileHandler)
if handler_rf:
logging.root.removeHandler(handler_rf)
handler_rf = RotatingFileHandler(
logfile,
maxBytes=1024 * 1024 * 10, # 10Mb
backupCount=10,
)
try:
logfile_path = Path(logfile)
logfile_path.parent.mkdir(parents=True, exist_ok=True)
handler_rf = RotatingFileHandler(
logfile_path,
maxBytes=1024 * 1024 * 10, # 10Mb
backupCount=10,
)
except PermissionError:
raise OperationalException(
f'Failed to create or access log file "{logfile_path.absolute()}". '
"Please make sure you have the write permission to the log file or its parent "
"directories. If you're running freqtrade using docker, you see this error "
"message probably because you've logged in as the root user, please switch to "
"non-root user, delete and recreate the directories you need, and then try "
"again."
)
handler_rf.setFormatter(Formatter(LOGFORMAT))
logging.root.addHandler(handler_rf)

View File

@@ -6,11 +6,11 @@ Read the documentation to know what cli arguments you need.
import logging
import sys
from typing import Any, Optional
from typing import Any
# check min. python version
if sys.version_info < (3, 10): # pragma: no cover
if sys.version_info < (3, 10): # pragma: no cover # noqa: UP036
sys.exit("Freqtrade requires Python version >= 3.10")
from freqtrade import __version__
@@ -24,7 +24,7 @@ from freqtrade.system import asyncio_setup, gc_set_threshold
logger = logging.getLogger("freqtrade")
def main(sysargv: Optional[list[str]] = None) -> None:
def main(sysargv: list[str] | None = None) -> None:
"""
This function will initiate the bot and start the trading loop.
:return: None

View File

@@ -7,7 +7,7 @@ import logging
from collections.abc import Iterator, Mapping
from io import StringIO
from pathlib import Path
from typing import Any, Optional, TextIO, Union
from typing import Any, TextIO
from urllib.parse import urlparse
import pandas as pd
@@ -129,10 +129,10 @@ def round_dict(d, n):
return {k: (round(v, n) if isinstance(v, float) else v) for k, v in d.items()}
DictMap = Union[dict[str, Any], Mapping[str, Any]]
DictMap = dict[str, Any] | Mapping[str, Any]
def safe_value_fallback(obj: DictMap, key1: str, key2: Optional[str] = None, default_value=None):
def safe_value_fallback(obj: DictMap, key1: str, key2: str | None = None, default_value=None):
"""
Search a value in obj, return this if it's not None.
Then search key2 in obj - return that if it's not none - then use default_value.
@@ -161,7 +161,7 @@ def safe_value_fallback2(dict1: DictMap, dict2: DictMap, key1: str, key2: str, d
return default_value
def plural(num: float, singular: str, plural: Optional[str] = None) -> str:
def plural(num: float, singular: str, plural: str | None = None) -> str:
return singular if (num == 1 or num == -1) else plural or singular + "s"

View File

@@ -1,4 +1,4 @@
from typing import Callable
from collections.abc import Callable
from cachetools import TTLCache, cached

View File

@@ -1,7 +1,7 @@
import logging
import time
from pathlib import Path
from typing import Any, Union
from typing import Any
import pandas as pd
from rich.text import Text
@@ -21,7 +21,7 @@ class LookaheadAnalysisSubFunctions:
def text_table_lookahead_analysis_instances(
config: dict[str, Any],
lookahead_instances: list[LookaheadAnalysis],
caption: Union[str, None] = None,
caption: str | None = None,
):
headers = [
"filename",
@@ -243,7 +243,7 @@ class LookaheadAnalysisSubFunctions:
# report the results
if lookaheadAnalysis_instances:
caption: Union[str, None] = None
caption: str | None = None
if any(
[
any(

View File

@@ -1,7 +1,6 @@
import hashlib
from copy import deepcopy
from pathlib import Path
from typing import Union
import rapidjson
@@ -38,7 +37,7 @@ def get_strategy_run_id(strategy) -> str:
return digest.hexdigest().lower()
def get_backtest_metadata_filename(filename: Union[Path, str]) -> Path:
def get_backtest_metadata_filename(filename: Path | str) -> Path:
"""Return metadata filename for specified backtest results file."""
filename = Path(filename)
return filename.parent / Path(f"{filename.stem}.meta{filename.suffix}")

View File

@@ -8,7 +8,7 @@ import logging
from collections import defaultdict
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from typing import Any, Optional
from typing import Any
from numpy import nan
from pandas import DataFrame
@@ -110,7 +110,7 @@ class Backtesting:
backtesting.start()
"""
def __init__(self, config: Config, exchange: Optional[Exchange] = None) -> None:
def __init__(self, config: Config, exchange: Exchange | None = None) -> None:
LoggingMixin.show_output = False
self.config = config
self.results: BacktestResultType = get_BacktestResultType_default()
@@ -685,7 +685,7 @@ class Backtesting:
)
def _try_close_open_order(
self, order: Optional[Order], trade: LocalTrade, current_date: datetime, row: tuple
self, order: Order | None, trade: LocalTrade, current_date: datetime, row: tuple
) -> bool:
"""
Check if an order is open and if it should've filled.
@@ -732,7 +732,6 @@ class Backtesting:
trade.close_date = current_time
trade.close(order.ft_price, show_msg=False)
# logger.debug(f"{pair} - Backtesting exit {trade}")
LocalTrade.close_bt_trade(trade)
self.wallets.update()
self.run_protections(pair, current_time, trade.trade_direction)
@@ -743,8 +742,8 @@ class Backtesting:
row: tuple,
exit_: ExitCheckTuple,
current_time: datetime,
amount: Optional[float] = None,
) -> Optional[LocalTrade]:
amount: float | None = None,
) -> LocalTrade | None:
if exit_.exit_flag:
trade.close_date = current_time
exit_reason = exit_.exit_reason
@@ -823,8 +822,8 @@ class Backtesting:
sell_row: tuple,
close_rate: float,
amount: float,
exit_reason: Optional[str],
) -> Optional[LocalTrade]:
exit_reason: str | None,
) -> LocalTrade | None:
self.order_id_counter += 1
exit_candle_time = sell_row[DATE_IDX].to_pydatetime()
order_type = self.strategy.order_types["exit"]
@@ -860,7 +859,7 @@ class Backtesting:
def _check_trade_exit(
self, trade: LocalTrade, row: tuple, current_time: datetime
) -> Optional[LocalTrade]:
) -> LocalTrade | None:
self._run_funding_fees(trade, current_time)
# Check if we need to adjust our current positions
@@ -910,10 +909,10 @@ class Backtesting:
stake_amount: float,
direction: LongShort,
current_time: datetime,
entry_tag: Optional[str],
trade: Optional[LocalTrade],
entry_tag: str | None,
trade: LocalTrade | None,
order_type: str,
price_precision: Optional[float],
price_precision: float | None,
) -> tuple[float, float, float, float]:
if order_type == "limit":
new_rate = strategy_safe_wrapper(
@@ -1005,12 +1004,12 @@ class Backtesting:
pair: str,
row: tuple,
direction: LongShort,
stake_amount: Optional[float] = None,
trade: Optional[LocalTrade] = None,
requested_rate: Optional[float] = None,
requested_stake: Optional[float] = None,
entry_tag1: Optional[str] = None,
) -> Optional[LocalTrade]:
stake_amount: float | None = None,
trade: LocalTrade | None = None,
requested_rate: float | None = None,
requested_stake: float | None = None,
entry_tag1: str | None = None,
) -> LocalTrade | None:
"""
:param trade: Trade to adjust - initial entry if None
:param requested_rate: Adjusted entry rate
@@ -1103,6 +1102,7 @@ class Backtesting:
fee_close=self.fee,
is_open=True,
enter_tag=entry_tag,
timeframe=self.timeframe_min,
exchange=self._exchange_name,
is_short=is_short,
trading_mode=self.trading_mode,
@@ -1139,7 +1139,7 @@ class Backtesting:
amount=amount,
filled=0,
remaining=amount,
cost=amount * propose_rate + trade.fee_open,
cost=amount * propose_rate * (1 + self.fee),
ft_order_tag=entry_tag,
)
order._trade_bt = trade
@@ -1178,7 +1178,7 @@ class Backtesting:
self.rejected_trades += 1
return False
def check_for_trade_entry(self, row) -> Optional[LongShort]:
def check_for_trade_entry(self, row) -> LongShort | None:
enter_long = row[LONG_IDX] == 1
exit_long = row[ELONG_IDX] == 1
enter_short = self._can_short and row[SHORT_IDX] == 1
@@ -1216,7 +1216,7 @@ class Backtesting:
def check_order_cancel(
self, trade: LocalTrade, order: Order, current_time: datetime
) -> Optional[bool]:
) -> bool | None:
"""
Check if current analyzed order has to be canceled.
Returns True if the trade should be Deleted (initial order was canceled),
@@ -1298,7 +1298,7 @@ class Backtesting:
def validate_row(
self, data: dict, pair: str, row_index: int, current_time: datetime
) -> Optional[tuple]:
) -> tuple | None:
try:
# Row is treated as "current incomplete candle".
# entry / exit signals are shifted by 1 to compensate for this.
@@ -1332,19 +1332,45 @@ class Backtesting:
row: tuple,
pair: str,
current_time: datetime,
end_date: datetime,
trade_dir: Optional[LongShort],
is_first: bool = True,
trade_dir: LongShort | None,
can_enter: bool,
) -> None:
"""
Conditionally call backtest_loop_inner a 2nd time if shorting is enabled,
a position closed and a new signal in the other direction is available.
"""
if not self._can_short or trade_dir is None:
# No need to reverse position if shorting is disabled or there's no new signal
self.backtest_loop_inner(row, pair, current_time, trade_dir, can_enter)
else:
for _ in (0, 1):
a = self.backtest_loop_inner(row, pair, current_time, trade_dir, can_enter)
if not a or a == trade_dir:
# the trade didn't close or position change is in the same direction
break
def backtest_loop_inner(
self,
row: tuple,
pair: str,
current_time: datetime,
trade_dir: LongShort | None,
can_enter: bool,
) -> LongShort | None:
"""
NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized.
Backtesting processing for one candle/pair.
"""
exiting_dir: LongShort | None = None
if not self._position_stacking and len(LocalTrade.bt_trades_open_pp[pair]) > 0:
# position_stacking not supported for now.
exiting_dir = "short" if LocalTrade.bt_trades_open_pp[pair][0].is_short else "long"
for t in list(LocalTrade.bt_trades_open_pp[pair]):
# 1. Manage currently open orders of active trades
if self.manage_open_orders(t, current_time, row):
# Close trade
# Remove trade (initial open order never filled)
LocalTrade.remove_bt_trade(t)
self.wallets.update()
@@ -1354,13 +1380,12 @@ class Backtesting:
# don't open on the last row
# We only open trades on the main candle, not on detail candles
if (
(self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0)
and is_first
and current_time != end_date
can_enter
and trade_dir is not None
and (self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0)
and not PairLocks.is_pair_locked(pair, row[DATE_IDX], trade_dir)
):
if self.trade_slot_available(LocalTrade.bt_open_open_trade_count):
if self.trade_slot_available(LocalTrade.bt_open_open_trade_count_candle):
trade = self._enter_trade(pair, row, trade_dir)
if trade:
self.wallets.update()
@@ -1382,6 +1407,10 @@ class Backtesting:
if order:
self._process_exit_order(order, trade, current_time, row, pair)
if exiting_dir and len(LocalTrade.bt_trades_open_pp[pair]) == 0:
return exiting_dir
return None
def time_pair_generator(
self, start_date: datetime, end_date: datetime, increment: timedelta, pairs: list[str]
):
@@ -1429,11 +1458,15 @@ class Backtesting:
indexes: dict = defaultdict(int)
# Loop timerange and get candle for each pair at that point in time
for current_time, pair, is_first in self.time_pair_generator(
for current_time, pair, is_first_call in self.time_pair_generator(
start_date, end_date, self.timeframe_td, list(data.keys())
):
if is_first:
if is_first_call:
self.check_abort()
# Reset open trade count for this candle
# Critical to avoid exceeding max_open_trades in backtesting
# when timeframe-detail is used and trades close within the opening candle.
LocalTrade.bt_open_open_trade_count_candle = LocalTrade.bt_open_open_trade_count
strategy_safe_wrapper(self.strategy.bot_loop_start, supress_error=True)(
current_time=current_time
)
@@ -1444,10 +1477,11 @@ class Backtesting:
row_index += 1
indexes[pair] = row_index
is_last_row = current_time == end_date
self.dataprovider._set_dataframe_max_index(self.required_startup + row_index)
self.dataprovider._set_dataframe_max_date(current_time)
current_detail_time: datetime = row[DATE_IDX].to_pydatetime()
trade_dir: Optional[LongShort] = self.check_for_trade_entry(row)
trade_dir: LongShort | None = self.check_for_trade_entry(row)
if (
(trade_dir is not None or len(LocalTrade.bt_trades_open_pp[pair]) > 0)
@@ -1466,7 +1500,8 @@ class Backtesting:
].copy()
if len(detail_data) == 0:
# Fall back to "regular" data if no detail data was found for this candle
self.backtest_loop(row, pair, current_time, end_date, trade_dir)
self.dataprovider._set_dataframe_max_date(current_time)
self.backtest_loop(row, pair, current_time, trade_dir, not is_last_row)
continue
detail_data.loc[:, "enter_long"] = row[LONG_IDX]
detail_data.loc[:, "exit_long"] = row[ELONG_IDX]
@@ -1482,15 +1517,14 @@ class Backtesting:
det_row,
pair,
current_time_det,
end_date,
trade_dir,
is_first,
is_first and not is_last_row,
)
current_time_det += self.timeframe_detail_td
is_first = False
else:
self.dataprovider._set_dataframe_max_date(current_time)
self.backtest_loop(row, pair, current_time, end_date, trade_dir)
self.backtest_loop(row, pair, current_time, trade_dir, not is_last_row)
self.handle_left_open(LocalTrade.bt_trades_open_pp, data=data)
self.wallets.update()
@@ -1518,12 +1552,6 @@ class Backtesting:
backtest_start_time = datetime.now(timezone.utc)
self._set_strategy(strat)
# Use max_open_trades in backtesting, except --disable-max-market-positions is set
if not self.config.get("use_max_market_positions", True):
logger.info("Ignoring max_open_trades (--disable-max-market-positions was used) ...")
self.strategy.max_open_trades = float("inf")
self.config.update({"max_open_trades": self.strategy.max_open_trades})
# need to reprocess data every time to populate signals
preprocessed = self.strategy.advise_all_indicators(data)

View File

@@ -1,7 +1,7 @@
import logging
from copy import deepcopy
from datetime import datetime, timezone
from typing import Any, Optional
from typing import Any
from pandas import DataFrame
@@ -28,7 +28,7 @@ class BaseAnalysis:
def __init__(self, config: dict[str, Any], strategy_obj: dict):
self.failed_bias_check = True
self.full_varHolder = VarHolder()
self.exchange: Optional[Any] = None
self.exchange: Any | None = None
self._fee = None
# pull variables the scope of the lookahead_analysis-instance

View File

@@ -0,0 +1,5 @@
from freqtrade.optimize.hyperopt.hyperopt import Hyperopt
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_interface import IHyperOptLoss
__all__ = ["Hyperopt", "IHyperOptLoss"]

View File

@@ -0,0 +1,352 @@
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
"""
This module contains the hyperopt logic
"""
import logging
import random
import sys
from datetime import datetime
from math import ceil
from multiprocessing import Manager
from pathlib import Path
from typing import Any
import rapidjson
from joblib import Parallel, cpu_count, delayed, wrap_non_picklable_objects
from joblib.externals import cloudpickle
from rich.console import Console
from freqtrade.constants import FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
from freqtrade.enums import HyperoptState
from freqtrade.exceptions import OperationalException
from freqtrade.misc import file_dump_json, plural
from freqtrade.optimize.hyperopt.hyperopt_logger import logging_mp_handle, logging_mp_setup
from freqtrade.optimize.hyperopt.hyperopt_optimizer import HyperOptimizer
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt_tools import (
HyperoptStateContainer,
HyperoptTools,
hyperopt_serializer,
)
from freqtrade.util import get_progress_tracker
logger = logging.getLogger(__name__)
INITIAL_POINTS = 30
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
# in the skopt model queue, to optimize memory consumption
SKOPT_MODEL_QUEUE_SIZE = 10
log_queue: Any
class Hyperopt:
"""
Hyperopt class, this class contains all the logic to run a hyperopt simulation
To start a hyperopt run:
hyperopt = Hyperopt(config)
hyperopt.start()
"""
def __init__(self, config: Config) -> None:
self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
self.config = config
self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
HyperoptStateContainer.set_state(HyperoptState.STARTUP)
if self.config.get("hyperopt"):
raise OperationalException(
"Using separate Hyperopt files has been removed in 2021.9. Please convert "
"your existing Hyperopt file to the new Hyperoptable strategy interface"
)
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
strategy = str(self.config["strategy"])
self.results_file: Path = (
self.config["user_data_dir"]
/ "hyperopt_results"
/ f"strategy_{strategy}_{time_now}.fthypt"
)
self.data_pickle_file = (
self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
)
self.total_epochs = config.get("epochs", 0)
self.current_best_loss = 100
self.clean_hyperopt()
self.num_epochs_saved = 0
self.current_best_epoch: dict[str, Any] | None = None
if HyperoptTools.has_space(self.config, "sell"):
# Make sure use_exit_signal is enabled
self.config["use_exit_signal"] = True
self.print_all = self.config.get("print_all", False)
self.hyperopt_table_header = 0
self.print_colorized = self.config.get("print_colorized", False)
self.print_json = self.config.get("print_json", False)
self.hyperopter = HyperOptimizer(self.config)
@staticmethod
def get_lock_filename(config: Config) -> str:
return str(config["user_data_dir"] / "hyperopt.lock")
def clean_hyperopt(self) -> None:
"""
Remove hyperopt pickle files to restart hyperopt.
"""
for f in [self.data_pickle_file, self.results_file]:
p = Path(f)
if p.is_file():
logger.info(f"Removing `{p}`.")
p.unlink()
def hyperopt_pickle_magic(self, bases) -> None:
"""
Hyperopt magic to allow strategy inheritance across files.
For this to properly work, we need to register the module of the imported class
to pickle as value.
"""
for modules in bases:
if modules.__name__ != "IStrategy":
cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
self.hyperopt_pickle_magic(modules.__bases__)
def _save_result(self, epoch: dict) -> None:
"""
Save hyperopt results to file
Store one line per epoch.
While not a valid json object - this allows appending easily.
:param epoch: result dictionary for this epoch.
"""
epoch[FTHYPT_FILEVERSION] = 2
with self.results_file.open("a") as f:
rapidjson.dump(
epoch,
f,
default=hyperopt_serializer,
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
)
f.write("\n")
self.num_epochs_saved += 1
logger.debug(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
# Store hyperopt filename
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
def print_results(self, results: dict[str, Any]) -> None:
"""
Log results if it is better than any previous evaluation
TODO: this should be moved to HyperoptTools too
"""
is_best = results["is_best"]
if self.print_all or is_best:
self._hyper_out.add_data(
self.config,
[results],
self.total_epochs,
self.print_all,
)
def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
"""Start optimizer in a parallel way"""
def optimizer_wrapper(*args, **kwargs):
# global log queue. This must happen in the file that initializes Parallel
logging_mp_setup(
log_queue, logging.INFO if self.config["verbosity"] < 1 else logging.DEBUG
)
return self.hyperopter.generate_optimizer(*args, **kwargs)
return parallel(delayed(wrap_non_picklable_objects(optimizer_wrapper))(v) for v in asked)
def _set_random_state(self, random_state: int | None) -> int:
return random_state or random.randint(1, 2**16 - 1) # noqa: S311
def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
"""
Enforce points returned from `self.opt.ask` have not been already evaluated
Steps:
1. Try to get points using `self.opt.ask` first
2. Discard the points that have already been evaluated
3. Retry using `self.opt.ask` up to 3 times
4. If still some points are missing in respect to `n_points`, random sample some points
5. Repeat until at least `n_points` points in the `asked_non_tried` list
6. Return a list with length truncated at `n_points`
"""
def unique_list(a_list):
new_list = []
for item in a_list:
if item not in new_list:
new_list.append(item)
return new_list
i = 0
asked_non_tried: list[list[Any]] = []
is_random_non_tried: list[bool] = []
while i < 5 and len(asked_non_tried) < n_points:
if i < 3:
self.opt.cache_ = {}
asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
is_random = [False for _ in range(len(asked))]
else:
asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
is_random = [True for _ in range(len(asked))]
is_random_non_tried += [
rand
for x, rand in zip(asked, is_random, strict=False)
if x not in self.opt.Xi and x not in asked_non_tried
]
asked_non_tried += [
x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
]
i += 1
if asked_non_tried:
return (
asked_non_tried[: min(len(asked_non_tried), n_points)],
is_random_non_tried[: min(len(asked_non_tried), n_points)],
)
else:
return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
"""
Evaluate results returned from generate_optimizer
"""
val["current_epoch"] = current
val["is_initial_point"] = current <= INITIAL_POINTS
logger.debug("Optimizer epoch evaluated: %s", val)
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
val["is_best"] = is_best
val["is_random"] = is_random
self.print_results(val)
if is_best:
self.current_best_loss = val["loss"]
self.current_best_epoch = val
self._save_result(val)
def _setup_logging_mp_workaround(self) -> None:
"""
Workaround for logging in child processes.
local_queue must be a global in the file that initializes Parallel.
"""
global log_queue
m = Manager()
log_queue = m.Queue()
def start(self) -> None:
self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
logger.info(f"Using optimizer random state: {self.random_state}")
self.hyperopt_table_header = -1
self.hyperopter.prepare_hyperopt()
cpus = cpu_count()
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
config_jobs = self.config.get("hyperopt_jobs", -1)
logger.info(f"Number of parallel jobs set as: {config_jobs}")
self.opt = self.hyperopter.get_optimizer(
config_jobs, self.random_state, INITIAL_POINTS, SKOPT_MODEL_QUEUE_SIZE
)
self._setup_logging_mp_workaround()
try:
with Parallel(n_jobs=config_jobs) as parallel:
jobs = parallel._effective_n_jobs()
logger.info(f"Effective number of parallel workers used: {jobs}")
console = Console(
color_system="auto" if self.print_colorized else None,
)
# Define progressbar
with get_progress_tracker(
console=console,
cust_callables=[self._hyper_out],
) as pbar:
task = pbar.add_task("Epochs", total=self.total_epochs)
start = 0
if self.analyze_per_epoch:
# First analysis not in parallel mode when using --analyze-per-epoch.
# This allows dataprovider to load it's informative cache.
asked, is_random = self.get_asked_points(n_points=1)
f_val0 = self.hyperopter.generate_optimizer(asked[0])
self.opt.tell(asked, [f_val0["loss"]])
self.evaluate_result(f_val0, 1, is_random[0])
pbar.update(task, advance=1)
start += 1
evals = ceil((self.total_epochs - start) / jobs)
for i in range(evals):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - (self.total_epochs - start)
current_jobs = jobs - n_rest if n_rest > 0 else jobs
asked, is_random = self.get_asked_points(n_points=current_jobs)
f_val = self.run_optimizer_parallel(parallel, asked)
self.opt.tell(asked, [v["loss"] for v in f_val])
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1 + start
self.evaluate_result(val, current, is_random[j])
pbar.update(task, advance=1)
logging_mp_handle(log_queue)
except KeyboardInterrupt:
print("User interrupted..")
logger.info(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
if self.current_best_epoch:
HyperoptTools.try_export_params(
self.config,
self.hyperopter.get_strategy_name(),
self.current_best_epoch,
)
HyperoptTools.show_epoch_details(
self.current_best_epoch, self.total_epochs, self.print_json
)
elif self.num_epochs_saved > 0:
print(
f"No good result found for given optimization function in {self.num_epochs_saved} "
f"{plural(self.num_epochs_saved, 'epoch')}."
)
else:
# This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated.
print("No epochs evaluated yet, no best result.")

View File

@@ -5,8 +5,8 @@ This module implements a convenience auto-hyperopt class, which can be used toge
"""
import logging
from collections.abc import Callable
from contextlib import suppress
from typing import Callable
from freqtrade.exceptions import OperationalException
@@ -14,7 +14,7 @@ from freqtrade.exceptions import OperationalException
with suppress(ImportError):
from skopt.space import Dimension
from freqtrade.optimize.hyperopt_interface import EstimatorType, IHyperOpt
from freqtrade.optimize.hyperopt.hyperopt_interface import EstimatorType, IHyperOpt
logger = logging.getLogger(__name__)

View File

@@ -6,7 +6,7 @@ This module defines the interface to apply for hyperopt
import logging
import math
from abc import ABC
from typing import Union
from typing import TypeAlias
from sklearn.base import RegressorMixin
from skopt.space import Categorical, Dimension, Integer
@@ -20,7 +20,7 @@ from freqtrade.strategy import IStrategy
logger = logging.getLogger(__name__)
EstimatorType = Union[RegressorMixin, str]
EstimatorType: TypeAlias = RegressorMixin | str
class IHyperOpt(ABC):

View File

@@ -0,0 +1,40 @@
import logging
from logging.handlers import QueueHandler
from multiprocessing import Queue, current_process
from queue import Empty
logger = logging.getLogger(__name__)
def logging_mp_setup(log_queue: Queue, verbosity: int):
"""
Setup logging in a child process.
Must be called in the child process before logging.
log_queue MUST be passed to the child process via inheritance
Which essentially means that the log_queue must be a global, created in the same
file as Parallel is initialized.
"""
current_proc = current_process().name
if current_proc != "MainProcess":
h = QueueHandler(log_queue)
root = logging.getLogger()
root.setLevel(verbosity)
root.addHandler(h)
def logging_mp_handle(q: Queue):
"""
Handle logging from a child process.
Must be called in the parent process to handle log messages from the child process.
"""
try:
while True:
record = q.get(block=False)
if record is None:
break
logger.handle(record)
except Empty:
pass

View File

@@ -1,45 +1,33 @@
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
"""
This module contains the hyperopt logic
This module contains the hyperopt optimizer class, which needs to be pickled
and will be sent to the hyperopt worker processes.
"""
import logging
import random
import sys
import warnings
from datetime import datetime, timezone
from math import ceil
from pathlib import Path
from typing import Any, Optional
from typing import Any
import rapidjson
from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects
from joblib import dump, load
from joblib.externals import cloudpickle
from pandas import DataFrame
from rich.console import Console
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
from freqtrade.constants import DATETIME_PRINT_FORMAT, Config
from freqtrade.data.converter import trim_dataframes
from freqtrade.data.history import get_timerange
from freqtrade.data.metrics import calculate_market_change
from freqtrade.enums import HyperoptState
from freqtrade.exceptions import OperationalException
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
from freqtrade.misc import deep_merge_dicts
from freqtrade.optimize.backtesting import Backtesting
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss
from freqtrade.optimize.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt_tools import (
HyperoptStateContainer,
HyperoptTools,
hyperopt_serializer,
)
# Import IHyperOptLoss to allow unpickling classes from these modules
from freqtrade.optimize.hyperopt.hyperopt_auto import HyperOptAuto
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_interface import IHyperOptLoss
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer, HyperoptTools
from freqtrade.optimize.optimize_reports import generate_strategy_stats
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
from freqtrade.util import get_progress_tracker
# Suppress scikit-learn FutureWarnings from skopt
@@ -51,22 +39,13 @@ with warnings.catch_warnings():
logger = logging.getLogger(__name__)
INITIAL_POINTS = 30
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
# in the skopt model queue, to optimize memory consumption
SKOPT_MODEL_QUEUE_SIZE = 10
MAX_LOSS = 100000 # just a big enough number to be bad result in loss optimization
class Hyperopt:
class HyperOptimizer:
"""
Hyperopt class, this class contains all the logic to run a hyperopt simulation
To start a hyperopt run:
hyperopt = Hyperopt(config)
hyperopt.start()
HyperoptOptimizer class
This class is sent to the hyperopt worker processes.
"""
def __init__(self, config: Config) -> None:
@@ -79,8 +58,6 @@ class Hyperopt:
self.max_open_trades_space: list[Dimension] = []
self.dimensions: list[Dimension] = []
self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
self.config = config
self.min_date: datetime
self.max_date: datetime
@@ -89,7 +66,6 @@ class Hyperopt:
self.pairlist = self.backtesting.pairlists.whitelist
self.custom_hyperopt: HyperOptAuto
self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
HyperoptStateContainer.set_state(HyperoptState.STARTUP)
if not self.config.get("hyperopt"):
self.custom_hyperopt = HyperOptAuto(self.config)
@@ -107,54 +83,35 @@ class Hyperopt:
self.config
)
self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
strategy = str(self.config["strategy"])
self.results_file: Path = (
self.config["user_data_dir"]
/ "hyperopt_results"
/ f"strategy_{strategy}_{time_now}.fthypt"
)
self.data_pickle_file = (
self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
)
self.total_epochs = config.get("epochs", 0)
self.current_best_loss = 100
self.clean_hyperopt()
self.market_change = 0.0
self.num_epochs_saved = 0
self.current_best_epoch: Optional[dict[str, Any]] = None
# Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set
if not self.config.get("use_max_market_positions", True):
logger.debug("Ignoring max_open_trades (--disable-max-market-positions was used) ...")
self.backtesting.strategy.max_open_trades = float("inf")
config.update({"max_open_trades": self.backtesting.strategy.max_open_trades})
if HyperoptTools.has_space(self.config, "sell"):
# Make sure use_exit_signal is enabled
self.config["use_exit_signal"] = True
self.print_all = self.config.get("print_all", False)
self.hyperopt_table_header = 0
self.print_colorized = self.config.get("print_colorized", False)
self.print_json = self.config.get("print_json", False)
def prepare_hyperopt(self) -> None:
# Initialize spaces ...
self.init_spaces()
@staticmethod
def get_lock_filename(config: Config) -> str:
return str(config["user_data_dir"] / "hyperopt.lock")
self.prepare_hyperopt_data()
def clean_hyperopt(self) -> None:
"""
Remove hyperopt pickle files to restart hyperopt.
"""
for f in [self.data_pickle_file, self.results_file]:
p = Path(f)
if p.is_file():
logger.info(f"Removing `{p}`.")
p.unlink()
# We don't need exchange instance anymore while running hyperopt
self.backtesting.exchange.close()
self.backtesting.exchange._api = None
self.backtesting.exchange._api_async = None
self.backtesting.exchange.loop = None # type: ignore
self.backtesting.exchange._loop_lock = None # type: ignore
self.backtesting.exchange._cache_lock = None # type: ignore
# self.backtesting.exchange = None # type: ignore
self.backtesting.pairlists = None # type: ignore
def get_strategy_name(self) -> str:
return self.backtesting.strategy.get_strategy_name()
def hyperopt_pickle_magic(self, bases) -> None:
"""
@@ -177,33 +134,7 @@ class Hyperopt:
# Return a dict where the keys are the names of the dimensions
# and the values are taken from the list of parameters.
return {d.name: v for d, v in zip(dimensions, raw_params)}
def _save_result(self, epoch: dict) -> None:
"""
Save hyperopt results to file
Store one line per epoch.
While not a valid json object - this allows appending easily.
:param epoch: result dictionary for this epoch.
"""
epoch[FTHYPT_FILEVERSION] = 2
with self.results_file.open("a") as f:
rapidjson.dump(
epoch,
f,
default=hyperopt_serializer,
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
)
f.write("\n")
self.num_epochs_saved += 1
logger.debug(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
# Store hyperopt filename
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
return {d.name: v for d, v in zip(dimensions, raw_params, strict=False)}
def _get_params_details(self, params: dict) -> dict:
"""
@@ -257,21 +188,6 @@ class Hyperopt:
result["max_open_trades"] = {"max_open_trades": strategy.max_open_trades}
return result
def print_results(self, results: dict[str, Any]) -> None:
"""
Log results if it is better than any previous evaluation
TODO: this should be moved to HyperoptTools too
"""
is_best = results["is_best"]
if self.print_all or is_best:
self._hyper_out.add_data(
self.config,
[results],
self.total_epochs,
self.print_all,
)
def init_spaces(self):
"""
Assign the dimensions in the hyperoptimization space.
@@ -458,7 +374,14 @@ class Hyperopt:
"total_profit": total_profit,
}
def get_optimizer(self, dimensions: list[Dimension], cpu_count) -> Optimizer:
def get_optimizer(
self,
cpu_count: int,
random_state: int,
initial_points: int,
model_queue_size: int,
) -> Optimizer:
dimensions = self.dimensions
estimator = self.custom_hyperopt.generate_estimator(dimensions=dimensions)
acq_optimizer = "sampling"
@@ -473,21 +396,12 @@ class Hyperopt:
dimensions,
base_estimator=estimator,
acq_optimizer=acq_optimizer,
n_initial_points=INITIAL_POINTS,
n_initial_points=initial_points,
acq_optimizer_kwargs={"n_jobs": cpu_count},
random_state=self.random_state,
model_queue_size=SKOPT_MODEL_QUEUE_SIZE,
random_state=random_state,
model_queue_size=model_queue_size,
)
def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
"""Start optimizer in a parallel way"""
return parallel(
delayed(wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked
)
def _set_random_state(self, random_state: Optional[int]) -> int:
return random_state or random.randint(1, 2**16 - 1) # noqa: S311
def advise_and_trim(self, data: dict[str, DataFrame]) -> dict[str, DataFrame]:
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
@@ -523,173 +437,3 @@ class Hyperopt:
dump(preprocessed, self.data_pickle_file)
else:
dump(data, self.data_pickle_file)
def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
"""
Enforce points returned from `self.opt.ask` have not been already evaluated
Steps:
1. Try to get points using `self.opt.ask` first
2. Discard the points that have already been evaluated
3. Retry using `self.opt.ask` up to 3 times
4. If still some points are missing in respect to `n_points`, random sample some points
5. Repeat until at least `n_points` points in the `asked_non_tried` list
6. Return a list with length truncated at `n_points`
"""
def unique_list(a_list):
new_list = []
for item in a_list:
if item not in new_list:
new_list.append(item)
return new_list
i = 0
asked_non_tried: list[list[Any]] = []
is_random_non_tried: list[bool] = []
while i < 5 and len(asked_non_tried) < n_points:
if i < 3:
self.opt.cache_ = {}
asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
is_random = [False for _ in range(len(asked))]
else:
asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
is_random = [True for _ in range(len(asked))]
is_random_non_tried += [
rand
for x, rand in zip(asked, is_random)
if x not in self.opt.Xi and x not in asked_non_tried
]
asked_non_tried += [
x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
]
i += 1
if asked_non_tried:
return (
asked_non_tried[: min(len(asked_non_tried), n_points)],
is_random_non_tried[: min(len(asked_non_tried), n_points)],
)
else:
return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
"""
Evaluate results returned from generate_optimizer
"""
val["current_epoch"] = current
val["is_initial_point"] = current <= INITIAL_POINTS
logger.debug("Optimizer epoch evaluated: %s", val)
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
val["is_best"] = is_best
val["is_random"] = is_random
self.print_results(val)
if is_best:
self.current_best_loss = val["loss"]
self.current_best_epoch = val
self._save_result(val)
def start(self) -> None:
self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
logger.info(f"Using optimizer random state: {self.random_state}")
self.hyperopt_table_header = -1
# Initialize spaces ...
self.init_spaces()
self.prepare_hyperopt_data()
# We don't need exchange instance anymore while running hyperopt
self.backtesting.exchange.close()
self.backtesting.exchange._api = None
self.backtesting.exchange._api_async = None
self.backtesting.exchange.loop = None # type: ignore
self.backtesting.exchange._loop_lock = None # type: ignore
self.backtesting.exchange._cache_lock = None # type: ignore
# self.backtesting.exchange = None # type: ignore
self.backtesting.pairlists = None # type: ignore
cpus = cpu_count()
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
config_jobs = self.config.get("hyperopt_jobs", -1)
logger.info(f"Number of parallel jobs set as: {config_jobs}")
self.opt = self.get_optimizer(self.dimensions, config_jobs)
try:
with Parallel(n_jobs=config_jobs) as parallel:
jobs = parallel._effective_n_jobs()
logger.info(f"Effective number of parallel workers used: {jobs}")
console = Console(
color_system="auto" if self.print_colorized else None,
)
# Define progressbar
with get_progress_tracker(
console=console,
cust_callables=[self._hyper_out],
) as pbar:
task = pbar.add_task("Epochs", total=self.total_epochs)
start = 0
if self.analyze_per_epoch:
# First analysis not in parallel mode when using --analyze-per-epoch.
# This allows dataprovider to load it's informative cache.
asked, is_random = self.get_asked_points(n_points=1)
f_val0 = self.generate_optimizer(asked[0])
self.opt.tell(asked, [f_val0["loss"]])
self.evaluate_result(f_val0, 1, is_random[0])
pbar.update(task, advance=1)
start += 1
evals = ceil((self.total_epochs - start) / jobs)
for i in range(evals):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - (self.total_epochs - start)
current_jobs = jobs - n_rest if n_rest > 0 else jobs
asked, is_random = self.get_asked_points(n_points=current_jobs)
f_val = self.run_optimizer_parallel(parallel, asked)
self.opt.tell(asked, [v["loss"] for v in f_val])
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1 + start
self.evaluate_result(val, current, is_random[j])
pbar.update(task, advance=1)
except KeyboardInterrupt:
print("User interrupted..")
logger.info(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
if self.current_best_epoch:
HyperoptTools.try_export_params(
self.config, self.backtesting.strategy.get_strategy_name(), self.current_best_epoch
)
HyperoptTools.show_epoch_details(
self.current_best_epoch, self.total_epochs, self.print_json
)
elif self.num_epochs_saved > 0:
print(
f"No good result found for given optimization function in {self.num_epochs_saved} "
f"{plural(self.num_epochs_saved, 'epoch')}."
)
else:
# This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated.
print("No epochs evaluated yet, no best result.")

View File

@@ -1,6 +1,6 @@
import sys
from os import get_terminal_size
from typing import Any, Optional
from typing import Any
from rich.align import Align
from rich.console import Console
@@ -37,7 +37,7 @@ class HyperoptOutput:
self.table.add_column("Objective", justify="right")
self.table.add_column("Max Drawdown (Acct)", justify="right")
def print(self, console: Optional[Console] = None, *, print_colorized=True):
def print(self, console: Console | None = None, *, print_colorized=True):
if not console:
console = Console(
color_system="auto" if print_colorized else None,
@@ -57,7 +57,7 @@ class HyperoptOutput:
stake_currency = config["stake_currency"]
self._results.extend(results)
max_rows: Optional[int] = None
max_rows: int | None = None
if self._streaming:
try:

Some files were not shown because too many files have changed in this diff Show More