Merge pull request #11011 from freqtrade/new_release

New release 2024.11
This commit is contained in:
Matthias
2024-12-01 20:11:57 +01:00
committed by GitHub
202 changed files with 12823 additions and 4114 deletions

View File

@@ -46,7 +46,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-${{ matrix.python-version }}-ubuntu
- name: TA binary *nix
if: steps.cache.outputs.cache-hit != 'true'
@@ -167,7 +167,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~/Library/Caches/pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-${{ matrix.os }}-${{ matrix.python-version }}
- name: TA binary *nix
if: steps.cache.outputs.cache-hit != 'true'
@@ -196,7 +196,7 @@ jobs:
rm /usr/local/bin/python3.11-config || true
rm /usr/local/bin/python3.12-config || true
brew install hdf5 c-blosc libomp
brew install libomp
- name: Installation (python)
run: |
@@ -280,7 +280,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~\AppData\Local\pip\Cache
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-${{ matrix.os }}-${{ matrix.python-version }}
- name: Installation
run: |
@@ -420,7 +420,7 @@ jobs:
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
key: pip-3.12-ubuntu
- name: TA binary *nix
if: steps.cache.outputs.cache-hit != 'true'
@@ -540,12 +540,12 @@ jobs:
- name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@v1.10.3
uses: pypa/gh-action-pypi-publish@v1.12.2
with:
repository-url: https://test.pypi.org/legacy/
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.10.3
uses: pypa/gh-action-pypi-publish@v1.12.2
deploy-docker:

View File

@@ -31,7 +31,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.7.1'
rev: 'v0.8.0'
hooks:
- id: ruff
- id: ruff-format

View File

@@ -33,6 +33,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
- [X] [Bybit](https://bybit.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [Kraken](https://kraken.com/)
- [X] [OKX](https://okx.com/) (Former OKEX)
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
@@ -41,6 +42,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
- [X] [Binance](https://www.binance.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [OKX](https://okx.com/)
- [X] [Bybit](https://bybit.com/)

View File

@@ -682,12 +682,18 @@
},
"exit_fill": {
"description": "Telegram setting for exit fill signals.",
"type": "string",
"enum": [
"on",
"off",
"silent"
"type": [
"string",
"object"
],
"additionalProperties": {
"type": "string",
"enum": [
"on",
"off",
"silent"
]
},
"default": "on"
},
"exit_cancel": {

View File

@@ -37,8 +37,8 @@ class SuperDuperHyperOptLoss(IHyperOptLoss):
min_date: datetime,
max_date: datetime,
config: Config,
processed: Dict[str, DataFrame],
backtest_stats: Dict[str, Any],
processed: dict[str, DataFrame],
backtest_stats: dict[str, Any],
**kwargs,
) -> float:
"""
@@ -103,7 +103,7 @@ class MyAwesomeStrategy(IStrategy):
SKDecimal(0.01, 0.20, decimals=3, name='roi_p3'),
]
def generate_roi_table(params: Dict) -> Dict[int, float]:
def generate_roi_table(params: Dict) -> dict[int, float]:
roi_table = {}
roi_table[0] = params['roi_p1'] + params['roi_p2'] + params['roi_p3']

View File

@@ -10,12 +10,14 @@ To learn how to get data for the pairs and exchange you're interested in, head o
```
usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [-s NAME]
[--strategy-path PATH] [-i TIMEFRAME]
[--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5}]
[--strategy-path PATH]
[--recursive-strategy-search]
[--freqaimodel NAME] [--freqaimodel-path PATH]
[-i TIMEFRAME] [--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
[--max-open-trades INT]
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
[-p PAIRS [PAIRS ...]] [--eps] [--dmmp]
[-p PAIRS [PAIRS ...]] [--eps]
[--enable-protections]
[--dry-run-wallet DRY_RUN_WALLET]
[--timeframe-detail TIMEFRAME_DETAIL]
@@ -24,8 +26,9 @@ usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[--export-filename PATH]
[--breakdown {day,week,month} [{day,week,month} ...]]
[--cache {none,day,week,month}]
[--freqai-backtest-live-models]
optional arguments:
options:
-h, --help show this help message and exit
-i TIMEFRAME, --timeframe TIMEFRAME
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
@@ -48,10 +51,6 @@ optional arguments:
--eps, --enable-position-stacking
Allow buying the same pair multiple times (position
stacking).
--dmmp, --disable-max-market-positions
Disable applying `max_open_trades` during backtest
(same as setting `max_open_trades` to a very high
number).
--enable-protections, --enableprotections
Enable protections for backtesting.Will slow
backtesting down by a considerable amount, but will
@@ -80,10 +79,13 @@ optional arguments:
--cache {none,day,week,month}
Load a cached backtest result no older than specified
age (default: day).
--freqai-backtest-live-models
Run backtest with ready models.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
--logfile FILE, --log-file FILE
Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
@@ -92,7 +94,7 @@ Common arguments:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
@@ -102,6 +104,12 @@ Strategy arguments:
Specify strategy class name which will be used by the
bot.
--strategy-path PATH Specify additional strategy lookup path.
--recursive-strategy-search
Recursively search for a strategy in the strategies
folder.
--freqaimodel NAME Specify a custom freqaimodels.
--freqaimodel-path PATH
Specify additional lookup path for freqaimodels.
```
@@ -558,6 +566,7 @@ Since backtesting lacks some detailed information about what happens within a ca
- Stoploss
- ROI
- Trailing stoploss
- Position reversals (futures only) happen if an entry signal in the other direction than the closing trade triggers at the candle the existing trade closes.
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
Also, keep in mind that past results don't guarantee future success.
@@ -572,7 +581,7 @@ These limits are usually listed in the exchange documentation as "trading rules"
Backtesting (as well as live and dry-run) does honor these limits, and will ensure that a stoploss can be placed below this value - so the value will be slightly higher than what the exchange specifies.
Freqtrade has however no information about historic limits.
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50$.
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50\$.
For example:

View File

@@ -146,10 +146,10 @@ Freqtrade can also load many options via command line (CLI) arguments (check out
The prevalence for all Options is as follows:
- CLI arguments override any other option
- [Environment Variables](#environment-variables)
- Configuration files are used in sequence (the last file wins) and override Strategy configurations.
- Strategy configurations are only used if they are not set via configuration or command-line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
* CLI arguments override any other option
* [Environment Variables](#environment-variables)
* Configuration files are used in sequence (the last file wins) and override Strategy configurations.
* Strategy configurations are only used if they are not set via configuration or command-line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
### Parameters table
@@ -183,7 +183,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `margin_mode` | When trading with leverage, this determines if the collateral owned by the trader will be shared or isolated to each trading pair [leverage documentation](leverage.md). <br> **Datatype:** String
| `liquidation_buffer` | A ratio specifying how large of a safety net to place between the liquidation price and the stoploss to prevent a position from reaching the liquidation price [leverage documentation](leverage.md). <br>*Defaults to `0.05`.* <br> **Datatype:** Float
| | **Unfilled timeout**
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.exit` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled exit order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
| `unfilledtimeout.unit` | Unit to use in unfilledtimeout setting. Note: If you set unfilledtimeout.unit to "seconds", "internals.process_throttle_secs" must be inferior or equal to timeout [Strategy Override](#parameters-in-the-strategy). <br> *Defaults to `"minutes"`.* <br> **Datatype:** String
| `unfilledtimeout.exit_timeout_count` | How many times can exit orders time out. Once this number of timeouts is reached, an emergency exit is triggered. 0 to disable and allow unlimited order cancels. [Strategy Override](#parameters-in-the-strategy).<br>*Defaults to `0`.* <br> **Datatype:** Integer
@@ -295,10 +295,10 @@ Values set in the configuration file always overwrite values set in the strategy
* `order_time_in_force`
* `unfilledtimeout`
* `disable_dataframe_checks`
- `use_exit_signal`
* `use_exit_signal`
* `exit_profit_only`
- `exit_profit_offset`
- `ignore_roi_if_entry_signal`
* `exit_profit_offset`
* `ignore_roi_if_entry_signal`
* `ignore_buying_expired_candle_after`
* `position_adjustment_enable`
* `max_entry_position_adjustment`
@@ -311,12 +311,12 @@ There are several methods to configure how much of the stake currency the bot wi
The minimum stake amount will depend on exchange and pair and is usually listed in the exchange support pages.
Assuming the minimum tradable amount for XRP/USD is 20 XRP (given by the exchange), and the price is 0.6$, the minimum stake amount to buy this pair is `20 * 0.6 ~= 12`.
This exchange has also a limit on USD - where all orders must be > 10$ - which however does not apply in this case.
Assuming the minimum tradable amount for XRP/USD is 20 XRP (given by the exchange), and the price is 0.6\$, the minimum stake amount to buy this pair is `20 * 0.6 ~= 12`.
This exchange has also a limit on USD - where all orders must be > 10\$ - which however does not apply in this case.
To guarantee safe execution, freqtrade will not allow buying with a stake-amount of 10.1$, instead, it'll make sure that there's enough space to place a stoploss below the pair (+ an offset, defined by `amount_reserve_percent`, which defaults to 5%).
To guarantee safe execution, freqtrade will not allow buying with a stake-amount of 10.1\$, instead, it'll make sure that there's enough space to place a stoploss below the pair (+ an offset, defined by `amount_reserve_percent`, which defaults to 5%).
With a reserve of 5%, the minimum stake amount would be ~12.6$ (`12 * (1 + 0.05)`). If we take into account a stoploss of 10% on top of that - we'd end up with a value of ~14$ (`12.6 / (1 - 0.1)`).
With a reserve of 5%, the minimum stake amount would be ~12.6\$ (`12 * (1 + 0.05)`). If we take into account a stoploss of 10% on top of that - we'd end up with a value of ~14\$ (`12.6 / (1 - 0.1)`).
To limit this calculation in case of large stoploss values, the calculated minimum stake-limit will never be more than 50% above the real limit.
@@ -362,9 +362,9 @@ To overcome this, the option `amend_last_stake_amount` can be set to `True`, whi
In the example above this would mean:
- Trade1: 400 USDT
- Trade2: 400 USDT
- Trade3: 200 USDT
* Trade1: 400 USDT
* Trade2: 400 USDT
* Trade3: 200 USDT
!!! Note
This option only applies with [Static stake amount](#static-stake-amount) - since [Dynamic stake amount](#dynamic-stake-amount) divides the balances evenly.

View File

@@ -11,9 +11,8 @@ Without provided configuration, `--exchange` becomes mandatory.
You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used.
!!! Tip "Tip: Updating existing data"
If you already have backtesting data available in your data-directory and would like to refresh this data up to today, freqtrade will automatically calculate the data missing for the existing pairs and the download will occur from the latest available point until "now", neither --days or --timerange parameters are required. Freqtrade will keep the available data and only download the missing data.
If you are updating existing data after inserting new pairs that you have no data for, use `--new-pairs-days xx` parameter. Specified number of days will be downloaded for new pairs while old pairs will be updated with missing data only.
If you use `--days xx` parameter alone - data for specified number of days will be downloaded for _all_ pairs. Be careful, if specified number of days is smaller than gap between now and last downloaded candle - freqtrade will delete all existing data to avoid gaps in candle data.
If you already have backtesting data available in your data-directory and would like to refresh this data up to today, freqtrade will automatically calculate the missing timerange for the existing pairs and the download will occur from the latest available point until "now", neither `--days` or `--timerange` parameters are required. Freqtrade will keep the available data and only download the missing data.
If you are updating existing data after inserting new pairs that you have no data for, use the `--new-pairs-days xx` parameter. Specified number of days will be downloaded for new pairs while old pairs will be updated with missing data only.
### Usage
@@ -90,7 +89,7 @@ Common arguments:
!!! Tip "Downloading all data for one quote currency"
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
`freqtrade download-data --exchange binance --pairs ".*/USDT" <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
!!! Note "Startup period"
@@ -117,16 +116,17 @@ freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
or as regex (in this case, to download all active USDT pairs)
```bash
freqtrade download-data --exchange binance --pairs .*/USDT
freqtrade download-data --exchange binance --pairs ".*/USDT"
```
### Other Notes
* To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
* To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
* To change the exchange used to download the historical data from, either use `--exchange <exchange>` - or specify a different configuration file.
* To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
* To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
* To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
* Given starting points are ignored if data is already available, downloading only missing data up to today.
* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.

View File

@@ -162,7 +162,7 @@ Hopefully you also want to contribute this back upstream.
Whatever your motivations are - This should get you off the ground in trying to develop a new Pairlist Handler.
First of all, have a look at the [VolumePairList](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/pairlist/VolumePairList.py) Handler, and best copy this file with a name of your new Pairlist Handler.
First of all, have a look at the [VolumePairList](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/plugins/pairlist/VolumePairList.py) Handler, and best copy this file with a name of your new Pairlist Handler.
This is a simple Handler, which however serves as a good example on how to start developing.
@@ -226,7 +226,7 @@ In `VolumePairList`, this implements different methods of sorting, does early va
##### sample
``` python
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
def filter_pairlist(self, pairlist: list[str], tickers: dict) -> List[str]:
# Generate dynamic whitelist
pairs = self._calculate_pairlist(pairlist, tickers)
return pairs

View File

@@ -252,6 +252,14 @@ OKX requires a passphrase for each api key, you will therefore need to add this
Gate.io allows the use of `POINT` to pay for fees. As this is not a tradable currency (no regular market available), automatic fee calculations will fail (and default to a fee of 0).
The configuration parameter `exchange.unknown_fee_rate` can be used to specify the exchange rate between Point and the stake currency. Obviously, changing the stake-currency will also require changes to this value.
Gate API keys require the following permissions on top of the market type you want to trade:
* "Spot Trade" _or_ "Perpetual Futures" (Read and Write) (either select both, or the one matching the market you want to trade)
* "Wallet" (read only)
* "Account" (read only)
Without these permissions, the bot will not start correctly and show errors like "permission missing".
## Bybit
Futures trading on bybit is currently supported for USDT markets, and will use isolated futures mode.
@@ -261,6 +269,7 @@ On startup, freqtrade will set the position mode to "One-way Mode" for the whole
As bybit doesn't provide funding rate history, the dry-run calculation is used for live trades as well.
API Keys for live futures trading must have the following permissions:
* Read-write
* Contract - Orders
* Contract - Positions
@@ -295,6 +304,41 @@ It's therefore required to pass the UID as well.
!!! Warning "Necessary Verification"
Bitmart requires Verification Lvl2 to successfully trade on the spot market through the API - even though trading via UI works just fine with just Lvl1 verification.
## Hyperliquid
!!! Tip "Stoploss on Exchange"
Hyperliquid supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it.
Hyperliquid is a Decentralized Exchange (DEX). Decentralized exchanges work a bit different compared to normal exchanges. Instead of authenticating private API calls using an API key, private API calls need to be signed with the private key of your wallet (We recommend using an api Wallet for this, generated either on Hyperliquid or in your wallet of choice).
This needs to be configured like this:
```json
"exchange": {
"name": "hyperliquid",
"walletAddress": "your_eth_wallet_address",
"privateKey": "your_api_private_key",
// ...
}
```
* walletAddress in hex format: `0x<40 hex characters>` - Can be easily copied from your wallet - and should be your wallet address, not your API Wallet Address.
* privateKey in hex format: `0x<64 hex characters>` - Use the key the API Wallet shows on creation.
Hyperliquid handles deposits and withdrawals on the Arbitrum One chain, a Layer 2 scaling solution built on top of Ethereum. Hyperliquid uses USDC as quote / collateral. The process of depositing USDC on Hyperliquid requires a couple of steps, see [how to start trading](https://hyperliquid.gitbook.io/hyperliquid-docs/onboarding/how-to-start-trading) for details on what steps are needed.
!!! Note "Hyperliquid general usage Notes"
Hyperliquid does not support market orders, however ccxt will simulate market orders by placing limit orders with a maximum slippage of 5%.
Unfortunately, hyperliquid only offers 5000 historic candles, so backtesting will either need to build candles historically (by waiting and downloading the data incrementally over time) - or will be limited to the last 5000 candles.
!!! Info "Some general best practices (non exhaustive)"
* Beware of supply chain attacks, like pip package poisoning etcetera. Whenever you use your private key, make sure your environment is safe.
* Don't use your actual wallet private key for trading. Use the Hyperliquid [API generator](https://app.hyperliquid.xyz/API) to create a separate API wallet.
* Don't store your actual wallet private key on the server you use for freqtrade. Use the API wallet private key instead. This key won't allow withdrawals, only trading.
* Always keep your mnemonic phrase and private key private.
* Don't use the same mnemonic as the one you had to backup when initializing a hardware wallet, using the same mnemonic basically deletes the security of your hardware wallet.
* Create a different software wallet, only transfer the funds you want to trade with to that wallet, and use that wallet to trade on Hyperliquid.
* If you have funds you don't want to use for trading (after making a profit for example), transfer them back to your hardware wallet.
## All exchanges
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.
@@ -304,7 +348,7 @@ Should you experience constant errors with Nonce (like `InvalidNonce`), it is be
* The Ocean (exchange id: `theocean`) exchange uses Web3 functionality and requires `web3` python package to be installed:
```shell
$ pip3 install web3
pip3 install web3
```
### Getting latest price / Incomplete candles

View File

@@ -100,6 +100,19 @@ You can use the `/stopentry` command in Telegram to prevent future trade entry,
Please look at the [advanced setup documentation Page](advanced-setup.md#running-multiple-instances-of-freqtrade).
### I'm getting "Impossible to load Strategy" when starting the bot
This error message is shown when the bot cannot load the strategy.
Usually, you can use `freqtrade list-strategies` to list all available strategies.
The output of this command will also include a status column, showing if the strategy can be loaded.
Please check the following:
* Are you using the correct strategy name? The strategy name is case-sensitive and must correspond to the Strategy class name (not the filename!).
* Is the strategy in the `user_data/strategies` directory, and has the file-ending `.py`?
* Does the bot show other warnings before this error? Maybe you're missing some dependencies for the strategy - which would be highlighted in the log.
* In case of docker - is the strategy directory mounted correctly (check the volumes part of the docker-compose file)?
### I'm getting "Missing data fillup" messages in the log
This message is just a warning that the latest candles had missing candles in them.
@@ -146,9 +159,9 @@ The same fix should be applied in the configuration file, if order types are def
### I'm trying to start the bot live, but get an API permission error
Errors like `Invalid API-key, IP, or permissions for action` mean exactly what they actually say.
Your API key is either invalid (copy/paste error? check for leading/trailing spaces in the config), expired, or the IP you're running the bot from is not enabled in the Exchange's API console.
Usually, the permission "Spot Trading" (or the equivalent in the exchange you use) will be necessary.
Errors like `Invalid API-key, IP, or permissions for action` mean exactly what they actually say.
Your API key is either invalid (copy/paste error? check for leading/trailing spaces in the config), expired, or the IP you're running the bot from is not enabled in the Exchange's API console.
Usually, the permission "Spot Trading" (or the equivalent in the exchange you use) will be necessary.
Futures will usually have to be enabled specifically.
### How do I search the bot logs for something?

View File

@@ -293,10 +293,10 @@ class MyCoolPyTorchClassifier(BasePyTorchClassifier):
super().__init__(**kwargs)
config = self.freqai_info.get("model_training_parameters", {})
self.learning_rate: float = config.get("learning_rate", 3e-4)
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
"""
User sets up the training and test data to fit their desired model here
:param data_dictionary: the dictionary holding all data for train, test,
@@ -359,10 +359,10 @@ class PyTorchMLPRegressor(BasePyTorchRegressor):
super().__init__(**kwargs)
config = self.freqai_info.get("model_training_parameters", {})
self.learning_rate: float = config.get("learning_rate", 3e-4)
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
n_features = data_dictionary["train_features"].shape[-1]
model = PyTorchMLPModel(
input_dim=n_features,
@@ -393,7 +393,7 @@ Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. T
For example, if you are using a binary classifier to predict price movements as up or down, you can set the class names as follows:
```python
def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame:
def set_freqai_targets(self, dataframe: DataFrame, metadata: dict, **kwargs) -> DataFrame:
self.freqai.class_names = ["down", "up"]
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
dataframe["close"], 'up', 'down')

View File

@@ -42,11 +42,11 @@ usage: freqtrade hyperopt [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
[--recursive-strategy-search] [--freqaimodel NAME]
[--freqaimodel-path PATH] [-i TIMEFRAME]
[--timerange TIMERANGE]
[--data-format-ohlcv {json,jsongz,hdf5}]
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
[--max-open-trades INT]
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
[-p PAIRS [PAIRS ...]] [--hyperopt-path PATH]
[--eps] [--dmmp] [--enable-protections]
[--eps] [--enable-protections]
[--dry-run-wallet DRY_RUN_WALLET]
[--timeframe-detail TIMEFRAME_DETAIL] [-e INT]
[--spaces {all,buy,sell,roi,stoploss,trailing,protection,trades,default} [{all,buy,sell,roi,stoploss,trailing,protection,trades,default} ...]]
@@ -55,15 +55,15 @@ usage: freqtrade hyperopt [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
[--hyperopt-loss NAME] [--disable-param-export]
[--ignore-missing-spaces] [--analyze-per-epoch]
optional arguments:
options:
-h, --help show this help message and exit
-i TIMEFRAME, --timeframe TIMEFRAME
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
--timerange TIMERANGE
Specify what timerange of data to use.
--data-format-ohlcv {json,jsongz,hdf5}
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
Storage format for downloaded candle (OHLCV) data.
(default: `json`).
(default: `feather`).
--max-open-trades INT
Override the value of the `max_open_trades`
configuration setting.
@@ -80,10 +80,6 @@ optional arguments:
--eps, --enable-position-stacking
Allow buying the same pair multiple times (position
stacking).
--dmmp, --disable-max-market-positions
Disable applying `max_open_trades` during backtest
(same as setting `max_open_trades` to a very high
number).
--enable-protections, --enableprotections
Enable protections for backtesting.Will slow
backtesting down by a considerable amount, but will
@@ -133,7 +129,8 @@ optional arguments:
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are:
--logfile FILE, --log-file FILE
Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit
@@ -142,7 +139,7 @@ Common arguments:
`userdir/config.json` or `config.json` whichever
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.
@@ -867,18 +864,15 @@ You can use the `--print-all` command line option if you would like to see all r
## Position stacking and disabling max market positions
In some situations, you may need to run Hyperopt (and Backtesting) with the
`--eps`/`--enable-position-staking` and `--dmmp`/`--disable-max-market-positions` arguments.
In some situations, you may need to run Hyperopt (and Backtesting) with the `--eps`/`--enable-position-staking` argument, or you may need to set `max_open_trades` to a very high number to disable the limit on the number of open trades.
By default, hyperopt emulates the behavior of the Freqtrade Live Run/Dry Run, where only one
open trade is allowed for every traded pair. The total number of trades open for all pairs
open trade per pair is allowed. The total number of trades open for all pairs
is also limited by the `max_open_trades` setting. During Hyperopt/Backtesting this may lead to
some potential trades to be hidden (or masked) by previously open trades.
potential trades being hidden (or masked) by already open trades.
The `--eps`/`--enable-position-stacking` argument allows emulation of buying the same pair multiple times,
while `--dmmp`/`--disable-max-market-positions` disables applying `max_open_trades`
during Hyperopt/Backtesting (which is equal to setting `max_open_trades` to a very high
number).
The `--eps`/`--enable-position-stacking` argument allows emulation of buying the same pair multiple times.
Using `--max-open-trades` with a very high number will disable the limit on the number of open trades.
!!! Note
Dry/live runs will **NOT** use position stacking - therefore it does make sense to also validate the strategy without this as it's closer to reality.
@@ -919,13 +913,39 @@ Your epochs should therefore be aligned to the possible values - or you should b
After you run Hyperopt for the desired amount of epochs, you can later list all results for analysis, select only best or profitable once, and show the details for any of the epochs previously evaluated. This can be done with the `hyperopt-list` and `hyperopt-show` sub-commands. The usage of these sub-commands is described in the [Utils](utils.md#list-hyperopt-results) chapter.
## Output debug messages from your strategy
If you want to output debug messages from your strategy, you can use the `logging` module. By default, Freqtrade will output all messages with a level of `INFO` or higher.
``` python
import logging
logger = logging.getLogger(__name__)
class MyAwesomeStrategy(IStrategy):
...
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
logger.info("This is a debug message")
...
```
!!! Note "using print"
Messages printed via `print()` will not be shown in the hyperopt output unless parallelism is disabled (`-j 1`).
It is recommended to use the `logging` module instead.
## Validate backtesting results
Once the optimized strategy has been implemented into your strategy, you should backtest this strategy to make sure everything is working as expected.
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt `--dmmp`/`--disable-max-market-positions` and `--eps`/`--enable-position-stacking` for Backtesting.
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt for Backtesting.
### Why do my backtest results not match my hyperopt results?
Should results not match, check the following factors:
* You may have added parameters to hyperopt in `populate_indicators()` where they will be calculated only once **for all epochs**. If you are, for example, trying to optimise multiple SMA timeperiod values, the hyperoptable timeperiod parameter should be placed in `populate_entry_trend()` which is calculated every epoch. See [Optimizing an indicator parameter](https://www.freqtrade.io/en/stable/hyperopt/#optimizing-an-indicator-parameter).

View File

@@ -352,7 +352,7 @@ The optional `bearer_token` will be included in the requests Authorization Heade
#### MarketCapPairList
`MarketCapPairList` employs sorting/filtering of pairs by their marketcap rank based of CoinGecko. It will only recognize coins up to the coin placed at rank 250. The returned pairlist will be sorted based of their marketcap ranks.
`MarketCapPairList` employs sorting/filtering of pairs by their marketcap rank based of CoinGecko. The returned pairlist will be sorted based of their marketcap ranks.
```json
"pairlists": [
@@ -366,7 +366,8 @@ The optional `bearer_token` will be included in the requests Authorization Heade
]
```
`number_assets` defines the maximum number of pairs returned by the pairlist. `max_rank` will determine the maximum rank used in creating/filtering the pairlist. It's expected that some coins within the top `max_rank` marketcap will not be included in the resulting pairlist since not all pairs will have active trading pairs in your preferred market/stake/exchange combination.
`number_assets` defines the maximum number of pairs returned by the pairlist. `max_rank` will determine the maximum rank used in creating/filtering the pairlist. It's expected that some coins within the top `max_rank` marketcap will not be included in the resulting pairlist since not all pairs will have active trading pairs in your preferred market/stake/exchange combination.
While using a `max_rank` bigger than 250 is supported, it's not recommended, as it'll cause multiple API calls to CoinGecko, which can lead to rate limit issues.
The `refresh_period` setting defines the interval (in seconds) at which the marketcap rank data will be refreshed. The default is 86,400 seconds (1 day). The pairlist cache (`refresh_period`) applies to both generating pairlists (when in the first position in the list) and filtering instances (when not in the first position in the list).

View File

@@ -28,7 +28,7 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
- Develop your Strategy: Write your strategy in python, using [pandas](https://pandas.pydata.org/). Example strategies to inspire you are available in the [strategy repository](https://github.com/freqtrade/freqtrade-strategies).
- Download market data: Download historical data of the exchange and the markets your may want to trade with.
- Backtest: Test your strategy on downloaded historical data.
- Optimize: Find the best parameters for your strategy using hyperoptimization which employs machining learning methods. You can optimize buy, sell, take profit (ROI), stop-loss and trailing stop-loss parameters for your strategy.
- Optimize: Find the best parameters for your strategy using hyperoptimization which employs machine learning methods. You can optimize buy, sell, take profit (ROI), stop-loss and trailing stop-loss parameters for your strategy.
- Select markets: Create your static list or use an automatic one based on top traded volumes and/or prices (not available during backtesting). You can also explicitly blacklist markets you don't want to trade.
- Run: Test your strategy with simulated money (Dry-Run mode) or deploy it with real money (Live-Trade mode).
- Run using Edge (optional module): The concept is to find the best historical [trade expectancy](edge.md#expectancy) by markets based on variation of the stop-loss and then allow/reject markets to trade. The sizing of the trade is based on a risk of a percentage of your capital.
@@ -40,11 +40,12 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
Please read the [exchange specific notes](exchanges.md) to learn about eventual, special configurations needed for each exchange.
- [X] [Binance](https://www.binance.com/)
- [X] [Bitmart](https://bitmart.com/)
- [X] [BingX](https://bingx.com/invite/0EM9RX)
- [X] [Bitmart](https://bitmart.com/)
- [X] [Bybit](https://bybit.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [Kraken](https://kraken.com/)
- [X] [OKX](https://okx.com/) (Former OKEX)
- [ ] [potentially many others through <img alt="ccxt" width="30px" src="assets/ccxt-logo.svg" />](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
@@ -52,9 +53,10 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual,
### Supported Futures Exchanges (experimental)
- [X] [Binance](https://www.binance.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [OKX](https://okx.com/)
- [X] [Bybit](https://bybit.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
- [X] [OKX](https://okx.com/)
Please make sure to read the [exchange specific notes](exchanges.md), as well as the [trading with leverage](leverage.md) documentation before diving in.

View File

@@ -67,6 +67,18 @@ OS Specific steps are listed first, the common section below is necessary for al
sudo apt install -y python3-pip python3-venv python3-dev python3-pandas git curl
```
=== "MacOS"
#### Install necessary dependencies
Install [Homebrew](https://brew.sh/) if you don't have it already.
```bash
# install packages
brew install gettext libomp
```
!!! Note
The `setup.sh` script will install these dependencies for you - assuming brew is installed on your system.
=== "RaspberryPi/Raspbian"
The following assumes the latest [Raspbian Buster lite image](https://www.raspberrypi.org/downloads/raspbian/).
This image comes with python3.11 preinstalled, making it easy to get freqtrade up and running.
@@ -76,7 +88,7 @@ OS Specific steps are listed first, the common section below is necessary for al
```bash
sudo apt-get install python3-venv libatlas-base-dev cmake curl
# Use pywheels.org to speed up installation
# Use piwheels.org to speed up installation
sudo echo "[global]\nextra-index-url=https://www.piwheels.org/simple" > tee /etc/pip.conf
git clone https://github.com/freqtrade/freqtrade.git
@@ -150,9 +162,7 @@ Each time you open a new terminal, you must run `source .venv/bin/activate` to a
source ./.venv/bin/activate
```
### Congratulations
[You are ready](#you-are-ready), and run the bot
[You are now ready](#you-are-ready) to run the bot.
### Other options of /setup.sh script
@@ -220,7 +230,7 @@ cd ..
rm -rf ./ta-lib*
```
#### Setup Python virtual environment (virtualenv)
### Setup Python virtual environment (virtualenv)
You will run freqtrade in separated `virtual environment`
@@ -232,19 +242,18 @@ python3 -m venv .venv
source .venv/bin/activate
```
#### Install python dependencies
### Install python dependencies
```bash
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt
# install freqtrade
python3 -m pip install -e .
```
### Congratulations
[You are now ready](#you-are-ready) to run the bot.
[You are ready](#you-are-ready), and run the bot
#### (Optional) Post-installation Tasks
### (Optional) Post-installation Tasks
!!! Note
If you run the bot on a server, you should consider using [Docker](docker_quickstart.md) or a terminal multiplexer like `screen` or [`tmux`](https://en.wikipedia.org/wiki/Tmux) to avoid that the bot is stopped on logout.
@@ -333,9 +342,7 @@ cd build_helpers
bash install_ta-lib.sh ${CONDA_PREFIX} nosudo
```
### Congratulations
[You are ready](#you-are-ready), and run the bot
[You are now ready](#you-are-ready) to run the bot.
### Important shortcuts

View File

@@ -1,7 +1,7 @@
markdown==3.7
mkdocs==1.6.1
mkdocs-material==9.5.42
mkdocs-material==9.5.45
mdx_truly_sane_lists==1.3
pymdown-extensions==10.11.2
pymdown-extensions==10.12
jinja2==3.1.4
mike==2.1.3

View File

@@ -36,6 +36,7 @@ The Order-type will be ignored if only one mode is available.
| Gate | limit |
| Okx | limit |
| Kucoin | stop-limit, stop-market|
| Hyperliquid (futures only) | limit |
!!! Note "Tight stoploss"
<ins>Do not set too low/tight stoploss value when using stop loss on exchange!</ins>

View File

@@ -38,9 +38,9 @@ class AwesomeStrategy(IStrategy):
trade.set_custom_data(key='entry_type', value=trade_entry_type)
return super().bot_loop_start(**kwargs)
def adjust_entry_price(self, trade: Trade, order: Optional[Order], pair: str,
def adjust_entry_price(self, trade: Trade, order: Order | None, pair: str,
current_time: datetime, proposed_rate: float, current_order_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
entry_tag: str | None, side: str, **kwargs) -> float:
# Limit orders to use and follow SMA200 as price target for the first 10 minutes since entry trigger for BTC/USDT pair.
if (
pair == 'BTC/USDT'

View File

@@ -90,8 +90,8 @@ Called before entering a trade, makes it possible to manage your position size w
class AwesomeStrategy(IStrategy):
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
proposed_stake: float, min_stake: Optional[float], max_stake: float,
leverage: float, entry_tag: Optional[str], side: str,
proposed_stake: float, min_stake: float | None, max_stake: float,
leverage: float, entry_tag: str | None, side: str,
**kwargs) -> float:
dataframe, _ = self.dp.get_analyzed_dataframe(pair=pair, timeframe=self.timeframe)
@@ -165,7 +165,8 @@ Called for open trade every iteration (roughly every 5 seconds) until a trade is
The usage of the custom stoploss method must be enabled by setting `use_custom_stoploss=True` on the strategy object.
The stoploss price can only ever move upwards - if the stoploss value returned from `custom_stoploss` would result in a lower stoploss price than was previously set, it will be ignored. The traditional `stoploss` value serves as an absolute lower level and will be instated as the initial stoploss (before this method is called for the first time for a trade), and is still mandatory.
The stoploss price can only ever move upwards - if the stoploss value returned from `custom_stoploss` would result in a lower stoploss price than was previously set, it will be ignored. The traditional `stoploss` value serves as an absolute lower level and will be instated as the initial stoploss (before this method is called for the first time for a trade), and is still mandatory.
As custom stoploss acts as regular, changing stoploss, it will behave similar to `trailing_stop` - and trades exiting due to this will have the exit_reason of `"trailing_stop_loss"`.
The method must return a stoploss value (float / number) as a percentage of the current price.
E.g. If the `current_rate` is 200 USD, then returning `0.02` will set the stoploss price 2% lower, at 196 USD.
@@ -212,7 +213,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
"""
Custom stoploss logic, returning the new distance relative to current_rate (as ratio).
e.g. returning -0.05 would create a stoploss 5% below current_rate.
@@ -250,7 +251,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# Make sure you have the longest interval first - these conditions are evaluated from top to bottom.
if current_time - timedelta(minutes=120) > trade.open_date_utc:
@@ -276,7 +277,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
if after_fill:
# After an additional order, start with a stoploss of 10% below the new open rate
@@ -305,7 +306,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
if pair in ("ETH/BTC", "XRP/BTC"):
return -0.10
@@ -331,7 +332,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
if current_profit < 0.04:
return None # return None to keep using the initial stoploss
@@ -363,7 +364,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# evaluate highest to lowest, so that highest possible stop is used
if current_profit > 0.40:
@@ -394,7 +395,7 @@ class AwesomeStrategy(IStrategy):
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
last_candle = dataframe.iloc[-1].squeeze()
@@ -439,7 +440,7 @@ Stoploss values returned from `custom_stoploss()` must specify a percentage rela
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# once the profit has risen above 10%, keep the stoploss at 7% above the open price
if current_profit > 0.10:
@@ -482,7 +483,7 @@ The helper function `stoploss_from_absolute()` can be used to convert from an ab
def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
dataframe, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe)
trade_date = timeframe_to_prev_date(self.timeframe, trade.open_date_utc)
candle = dataframe.iloc[-1].squeeze()
@@ -519,8 +520,8 @@ class AwesomeStrategy(IStrategy):
# ... populate_* methods
def custom_entry_price(self, pair: str, trade: Optional[Trade], current_time: datetime, proposed_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
def custom_entry_price(self, pair: str, trade: Trade | None, current_time: datetime, proposed_rate: float,
entry_tag: str | None, side: str, **kwargs) -> float:
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair,
timeframe=self.timeframe)
@@ -530,7 +531,7 @@ class AwesomeStrategy(IStrategy):
def custom_exit_price(self, pair: str, trade: Trade,
current_time: datetime, proposed_rate: float,
current_profit: float, exit_tag: Optional[str], **kwargs) -> float:
current_profit: float, exit_tag: str | None, **kwargs) -> float:
dataframe, last_updated = self.dp.get_analyzed_dataframe(pair=pair,
timeframe=self.timeframe)
@@ -662,7 +663,7 @@ class AwesomeStrategy(IStrategy):
# ... populate_* methods
def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float,
time_in_force: str, current_time: datetime, entry_tag: Optional[str],
time_in_force: str, current_time: datetime, entry_tag: str | None,
side: str, **kwargs) -> bool:
"""
Called right before placing a entry order.
@@ -820,8 +821,8 @@ class DigDeeperStrategy(IStrategy):
# This is called when placing the initial order (opening trade)
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
proposed_stake: float, min_stake: Optional[float], max_stake: float,
leverage: float, entry_tag: Optional[str], side: str,
proposed_stake: float, min_stake: float | None, max_stake: float,
leverage: float, entry_tag: str | None, side: str,
**kwargs) -> float:
# We need to leave most of the funds for possible further DCA orders
@@ -830,11 +831,11 @@ class DigDeeperStrategy(IStrategy):
def adjust_trade_position(self, trade: Trade, current_time: datetime,
current_rate: float, current_profit: float,
min_stake: Optional[float], max_stake: float,
min_stake: float | None, max_stake: float,
current_entry_rate: float, current_exit_rate: float,
current_entry_profit: float, current_exit_profit: float,
**kwargs
) -> Union[Optional[float], Tuple[Optional[float], Optional[str]]]:
) -> float | None | tuple[float | None, str | None]:
"""
Custom trade adjustment logic, returning the stake amount that a trade should be
increased or decreased.
@@ -890,7 +891,7 @@ class DigDeeperStrategy(IStrategy):
# Hope you have a deep wallet!
try:
# This returns first order stake size
stake_amount = filled_entries[0].stake_amount
stake_amount = filled_entries[0].stake_amount_filled
# This then calculates current safety order size
stake_amount = stake_amount * (1 + (count_of_entries * 0.25))
return stake_amount, "1/3rd_increase"
@@ -946,9 +947,9 @@ class AwesomeStrategy(IStrategy):
# ... populate_* methods
def adjust_entry_price(self, trade: Trade, order: Optional[Order], pair: str,
def adjust_entry_price(self, trade: Trade, order: Order | None, pair: str,
current_time: datetime, proposed_rate: float, current_order_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
entry_tag: str | None, side: str, **kwargs) -> float:
"""
Entry price re-adjustment logic, returning the user desired limit price.
This only executes when a order was already placed, still open (unfilled fully or partially)
@@ -1003,7 +1004,7 @@ For markets / exchanges that don't support leverage, this method is ignored.
class AwesomeStrategy(IStrategy):
def leverage(self, pair: str, current_time: datetime, current_rate: float,
proposed_leverage: float, max_leverage: float, entry_tag: Optional[str], side: str,
proposed_leverage: float, max_leverage: float, entry_tag: str | None, side: str,
**kwargs) -> float:
"""
Customize leverage for each new trade. This method is only called in futures mode.

View File

@@ -4,7 +4,7 @@ This page explains how to customize your strategies, add new indicators and set
If you haven't already, please familiarize yourself with:
- the [Freqtrade strategy 101](freqtrade-101.md), which provides a quick start to strategy development
- the [Freqtrade strategy 101](strategy-101.md), which provides a quick start to strategy development
- the [Freqtrade bot basics](bot-basics.md), which provides overall info on how the bot operates
## Develop your own strategy
@@ -582,11 +582,14 @@ When hyperopting, use of the hyperoptable parameter `.value` attribute is not su
??? info "Full documentation"
``` python
def informative(timeframe: str, asset: str = '',
fmt: Optional[Union[str, Callable[[KwArg(str)], str]]] = None,
*,
candle_type: Optional[CandleType] = None,
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
def informative(
timeframe: str,
asset: str = "",
fmt: str | Callable[[Any], str] | None = None,
*,
candle_type: CandleType | str | None = None,
ffill: bool = True,
) -> Callable[[PopulateIndicators], PopulateIndicators]:
"""
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
define informative indicators.

View File

@@ -215,7 +215,7 @@ trades.groupby("pair")["exit_reason"].value_counts()
```
## Analyze the loaded trades for trade parallelism
This can be useful to find the best `max_open_trades` parameter, when used with backtesting in conjunction with `--disable-max-market-positions`.
This can be useful to find the best `max_open_trades` parameter, when used with backtesting in conjunction with a very high `max_open_trades` setting.
`analyze_trade_parallelism()` returns a timeseries dataframe with an "open_trades" column, specifying the number of open trades for each candle.

View File

@@ -214,8 +214,8 @@ class AwesomeStrategy(IStrategy):
``` python hl_lines="4"
class AwesomeStrategy(IStrategy):
def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float,
proposed_stake: float, min_stake: Optional[float], max_stake: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
proposed_stake: float, min_stake: float | None, max_stake: float,
entry_tag: str | None, side: str, **kwargs) -> float:
# ...
return proposed_stake
```
@@ -237,7 +237,7 @@ After:
``` python hl_lines="4"
class AwesomeStrategy(IStrategy):
def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float,
time_in_force: str, current_time: datetime, entry_tag: Optional[str],
time_in_force: str, current_time: datetime, entry_tag: str | None,
side: str, **kwargs) -> bool:
return True
```
@@ -280,8 +280,8 @@ After:
``` python hl_lines="3"
class AwesomeStrategy(IStrategy):
def custom_entry_price(self, pair: str, trade: Optional[Trade], current_time: datetime, proposed_rate: float,
entry_tag: Optional[str], side: str, **kwargs) -> float:
def custom_entry_price(self, pair: str, trade: Trade | None, current_time: datetime, proposed_rate: float,
entry_tag: str | None, side: str, **kwargs) -> float:
return proposed_rate
```
@@ -312,7 +312,7 @@ After:
``` python hl_lines="5 7"
def custom_stoploss(self, pair: str, trade: 'Trade', current_time: datetime,
current_rate: float, current_profit: float, after_fill: bool,
**kwargs) -> Optional[float]:
**kwargs) -> float | None:
# once the profit has risen above 10%, keep the stoploss at 7% above the open price
if current_profit > 0.10:
return stoploss_from_open(0.07, current_profit, is_short=trade.is_short)
@@ -329,7 +329,7 @@ After:
`order_time_in_force` attributes changed from `"buy"` to `"entry"` and `"sell"` to `"exit"`.
``` python
order_time_in_force: Dict = {
order_time_in_force: dict = {
"buy": "gtc",
"sell": "gtc",
}
@@ -338,7 +338,7 @@ After:
After:
``` python hl_lines="2 3"
order_time_in_force: Dict = {
order_time_in_force: dict = {
"entry": "GTC",
"exit": "GTC",
}
@@ -780,7 +780,7 @@ class MyCoolFreqaiModel(BaseRegressionModel):
def predict(
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
) -> tuple[DataFrame, npt.NDArray[np.int_]]:
# ... your custom stuff

View File

@@ -58,6 +58,7 @@ For the Freqtrade configuration, you can then use the full value (including `-`
```json
"chat_id": "-1001332619709"
```
!!! Warning "Using telegram groups"
When using telegram groups, you're giving every member of the telegram group access to your freqtrade bot and to all commands possible via telegram. Please make sure that you can trust everyone in the telegram group to avoid unpleasant surprises.
@@ -93,9 +94,12 @@ Example configuration showing the different settings:
"trailing_stop_loss": "on",
"stop_loss": "on",
"stoploss_on_exchange": "on",
"custom_exit": "silent",
"partial_exit": "on"
"custom_exit": "silent", // custom_exit without specifying an exit reason
"partial_exit": "on",
// "custom_exit_message": "silent", // Disable individual custom exit reasons
"*": "off" // Disable all other exit reasons
},
// "exit": "off", // Simplistic configuration to disable all exit messages
"exit_cancel": "on",
"exit_fill": "off",
"protection_trigger": "off",
@@ -108,16 +112,16 @@ Example configuration showing the different settings:
},
```
`entry` notifications are sent when the order is placed, while `entry_fill` notifications are sent when the order is filled on the exchange.
`exit` notifications are sent when the order is placed, while `exit_fill` notifications are sent when the order is filled on the exchange.
`*_fill` notifications are off by default and must be explicitly enabled.
`protection_trigger` notifications are sent when a protection triggers and `protection_trigger_global` notifications trigger when global protections are triggered.
`strategy_msg` - Receive notifications from the strategy, sent via `self.dp.send_msg()` from the strategy [more details](strategy-customization.md#send-notification).
`show_candle` - show candle values as part of entry/exit messages. Only possible values are `"ohlc"` or `"off"`.
`balance_dust_level` will define what the `/balance` command takes as "dust" - Currencies with a balance below this will be shown.
`allow_custom_messages` completely disable strategy messages.
`reload` allows you to disable reload-buttons on selected messages.
* `entry` notifications are sent when the order is placed, while `entry_fill` notifications are sent when the order is filled on the exchange.
* `exit` notifications are sent when the order is placed, while `exit_fill` notifications are sent when the order is filled on the exchange.
Exit messages (`exit` and `exit_fill`) can be further controlled at individual exit reasons level, with the specific exit reason as the key. the default for all exit reasons is `on` - but can be configured via special `*` key - which will act as a wildcard for all exit reasons that are not explicitly defined.
* `*_fill` notifications are off by default and must be explicitly enabled.
* `protection_trigger` notifications are sent when a protection triggers and `protection_trigger_global` notifications trigger when global protections are triggered.
* `strategy_msg` - Receive notifications from the strategy, sent via `self.dp.send_msg()` from the strategy [more details](strategy-customization.md#send-notification).
* `show_candle` - show candle values as part of entry/exit messages. Only possible values are `"ohlc"` or `"off"`.
* `balance_dust_level` will define what the `/balance` command takes as "dust" - Currencies with a balance below this will be shown.
* `allow_custom_messages` completely disable strategy messages.
* `reload` allows you to disable reload-buttons on selected messages.
## Create a custom keyboard (command shortcut buttons)
@@ -238,16 +242,16 @@ Once all positions are sold, run `/stop` to completely stop the bot.
For each open trade, the bot will send you the following message.
Enter Tag is configurable via Strategy.
> **Trade ID:** `123` `(since 1 days ago)`
> **Current Pair:** CVC/BTC
> **Direction:** Long
> **Leverage:** 1.0
> **Amount:** `26.64180098`
> **Enter Tag:** Awesome Long Signal
> **Open Rate:** `0.00007489`
> **Current Rate:** `0.00007489`
> **Unrealized Profit:** `12.95%`
> **Stoploss:** `0.00007389 (-0.02%)`
> **Trade ID:** `123` `(since 1 days ago)`
> **Current Pair:** CVC/BTC
> **Direction:** Long
> **Leverage:** 1.0
> **Amount:** `26.64180098`
> **Enter Tag:** Awesome Long Signal
> **Open Rate:** `0.00007489`
> **Current Rate:** `0.00007489`
> **Unrealized Profit:** `12.95%`
> **Stoploss:** `0.00007389 (-0.02%)`
### /status table
@@ -274,34 +278,34 @@ current max
Return a summary of your profit/loss and performance.
> **ROI:** Close trades
> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)`
> ∙ `62.968 USD`
> **ROI:** All trades
> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)`
> ∙ `33.095 EUR`
>
> **Total Trade Count:** `138`
> **Bot started:** `2022-07-11 18:40:44`
> **First Trade opened:** `3 days ago`
> **Latest Trade opened:** `2 minutes ago`
> **Avg. Duration:** `2:33:45`
> **Best Performing:** `PAY/BTC: 50.23%`
> **Trading volume:** `0.5 BTC`
> **Profit factor:** `1.04`
> **Win / Loss:** `102 / 36`
> **Winrate:** `73.91%`
> **Expectancy (Ratio):** `4.87 (1.66)`
> **Max Drawdown:** `9.23% (0.01255 BTC)`
> **ROI:** Close trades
> ∙ `0.00485701 BTC (2.2%) (15.2 Σ%)`
> ∙ `62.968 USD`
> **ROI:** All trades
> ∙ `0.00255280 BTC (1.5%) (6.43 Σ%)`
> ∙ `33.095 EUR`
>
> **Total Trade Count:** `138`
> **Bot started:** `2022-07-11 18:40:44`
> **First Trade opened:** `3 days ago`
> **Latest Trade opened:** `2 minutes ago`
> **Avg. Duration:** `2:33:45`
> **Best Performing:** `PAY/BTC: 50.23%`
> **Trading volume:** `0.5 BTC`
> **Profit factor:** `1.04`
> **Win / Loss:** `102 / 36`
> **Winrate:** `73.91%`
> **Expectancy (Ratio):** `4.87 (1.66)`
> **Max Drawdown:** `9.23% (0.01255 BTC)`
The relative profit of `1.2%` is the average profit per trade.
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
Starting capital is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
Profit Factor is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
Expectancy corresponds to the average return per currency unit at risk, i.e. the winrate and the risk-reward ratio (the average gain of winning trades compared to the average loss of losing trades).
Expectancy Ratio is expected profit or loss of a subsequent trade based on the performance of all past trades.
Max drawdown corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
Bot started date will refer to the date the bot was first started. For older bots, this will default to the first trade's open date.
The relative profit of `1.2%` is the average profit per trade.
The relative profit of `15.2 Σ%` is be based on the starting capital - so in this case, the starting capital was `0.00485701 * 1.152 = 0.00738 BTC`.
**Starting capital(**) is either taken from the `available_capital` setting, or calculated by using current wallet size - profits.
**Profit Factor** is calculated as gross profits / gross losses - and should serve as an overall metric for the strategy.
**Expectancy** corresponds to the average return per currency unit at risk, i.e. the winrate and the risk-reward ratio (the average gain of winning trades compared to the average loss of losing trades).
**Expectancy Ratio** is expected profit or loss of a subsequent trade based on the performance of all past trades.
**Max drawdown** corresponds to the backtesting metric `Absolute Drawdown (Account)` - calculated as `(Absolute Drawdown) / (DrawdownHigh + startingBalance)`.
**Bot started date** will refer to the date the bot was first started. For older bots, this will default to the first trade's open date.
### /forceexit <trade_id>
@@ -329,33 +333,34 @@ Note that for this to work, `force_entry_enable` needs to be set to true.
### /performance
Return the performance of each crypto-currency the bot has sold.
> Performance:
> 1. `RCN/BTC 0.003 BTC (57.77%) (1)`
> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)`
> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)`
> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)`
> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)`
> ...
> Performance:
> 1. `RCN/BTC 0.003 BTC (57.77%) (1)`
> 2. `PAY/BTC 0.0012 BTC (56.91%) (1)`
> 3. `VIB/BTC 0.0011 BTC (47.07%) (1)`
> 4. `SALT/BTC 0.0010 BTC (30.24%) (1)`
> 5. `STORJ/BTC 0.0009 BTC (27.24%) (1)`
> ...
### /balance
Return the balance of all crypto-currency your have on the exchange.
> **Currency:** BTC
> **Available:** 3.05890234
> **Balance:** 3.05890234
> **Pending:** 0.0
> **Currency:** CVC
> **Available:** 86.64180098
> **Balance:** 86.64180098
> **Pending:** 0.0
> **Currency:** BTC
> **Available:** 3.05890234
> **Balance:** 3.05890234
> **Pending:** 0.0
>
> **Currency:** CVC
> **Available:** 86.64180098
> **Balance:** 86.64180098
> **Pending:** 0.0
### /daily <n>
Per default `/daily` will return the 7 last days. The example below if for `/daily 3`:
> **Daily Profit over the last 3 days:**
```
Day (count) USDT USD Profit %
-------------- ------------ ---------- ----------
@@ -370,6 +375,7 @@ Per default `/weekly` will return the 8 last weeks, including the current week.
from Monday. The example below if for `/weekly 3`:
> **Weekly Profit over the last 3 weeks (starting from Monday):**
```
Monday (count) Profit BTC Profit USD Profit %
------------- -------------- ------------ ----------
@@ -396,18 +402,18 @@ Month (count) Profit BTC Profit USD Profit %
Shows the current whitelist
> Using whitelist `StaticPairList` with 22 pairs
> Using whitelist `StaticPairList` with 22 pairs
> `IOTA/BTC, NEO/BTC, TRX/BTC, VET/BTC, ADA/BTC, ETC/BTC, NCASH/BTC, DASH/BTC, XRP/BTC, XVG/BTC, EOS/BTC, LTC/BTC, OMG/BTC, BTG/BTC, LSK/BTC, ZEC/BTC, HOT/BTC, IOTX/BTC, XMR/BTC, AST/BTC, XLM/BTC, NANO/BTC`
### /blacklist [pair]
Shows the current blacklist.
If Pair is set, then this pair will be added to the pairlist.
Also supports multiple pairs, separated by a space.
Also supports multiple pairs, separated by a space.
Use `/reload_config` to reset the blacklist.
> Using blacklist `StaticPairList` with 2 pairs
>`DODGE/BTC`, `HOT/BTC`.
> Using blacklist `StaticPairList` with 2 pairs
>`DODGE/BTC`, `HOT/BTC`.
### /edge

View File

@@ -143,6 +143,7 @@ Most properties here can be None as they are dependent on the exchange response.
| `remaining` | float | Remaining amount |
| `cost` | float | Cost of the order - usually average * filled (*Exchange dependent on futures, may contain the cost with or without leverage and may be in contracts.*) |
| `stake_amount` | float | Stake amount used for this order. *Added in 2023.7.* |
| `stake_amount_filled` | float | Filled Stake amount used for this order. *Added in 2024.11.* |
| `order_date` | datetime | Order creation date **use `order_date_utc` instead** |
| `order_date_utc` | datetime | Order creation date (in UTC) |
| `order_fill_date` | datetime | Order fill date **use `order_fill_utc` instead** |

View File

@@ -1,6 +1,6 @@
"""Freqtrade bot"""
__version__ = "2024.10"
__version__ = "2024.11"
if "dev" in __version__:
from pathlib import Path

View File

@@ -5,7 +5,7 @@ This module contains the argument manager class
from argparse import ArgumentParser, Namespace, _ArgumentGroup
from functools import partial
from pathlib import Path
from typing import Any, Optional, Union
from typing import Any
from freqtrade.commands.cli_options import AVAILABLE_CLI_OPTIONS
from freqtrade.constants import DEFAULT_CONFIG
@@ -37,7 +37,6 @@ ARGS_COMMON_OPTIMIZE = [
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
@@ -53,7 +52,6 @@ ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [
"hyperopt",
"hyperopt_path",
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
@@ -117,7 +115,7 @@ ARGS_CREATE_USERDIR = ["user_data_dir", "reset"]
ARGS_BUILD_CONFIG = ["config"]
ARGS_SHOW_CONFIG = ["user_data_dir", "config", "show_sensitive"]
ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "template"]
ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "strategy_path", "template"]
ARGS_CONVERT_DATA_TRADES = ["pairs", "format_from_trades", "format_to", "erase", "exchange"]
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"]
@@ -242,8 +240,7 @@ ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_s
ARGS_LOOKAHEAD_ANALYSIS = [
a
for a in ARGS_BACKTEST
if a
not in ("position_stacking", "use_max_market_positions", "backtest_cache", "backtest_breakdown")
if a not in ("position_stacking", "backtest_cache", "backtest_breakdown")
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
ARGS_RECURSIVE_ANALYSIS = ["timeframe", "timerange", "dataformat_ohlcv", "pairs", "startup_candle"]
@@ -278,9 +275,9 @@ class Arguments:
Arguments Class. Manage the arguments received by the cli
"""
def __init__(self, args: Optional[list[str]]) -> None:
def __init__(self, args: list[str] | None) -> None:
self.args = args
self._parsed_arg: Optional[Namespace] = None
self._parsed_arg: Namespace | None = None
def get_parsed_arg(self) -> dict[str, Any]:
"""
@@ -322,9 +319,7 @@ class Arguments:
return parsed_arg
def _build_args(
self, optionlist: list[str], parser: Union[ArgumentParser, _ArgumentGroup]
) -> None:
def _build_args(self, optionlist: list[str], parser: ArgumentParser | _ArgumentGroup) -> None:
for val in optionlist:
opt = AVAILABLE_CLI_OPTIONS[val]
parser.add_argument(*opt.cli, dest=val, **opt.kwargs)

View File

@@ -168,14 +168,6 @@ AVAILABLE_CLI_OPTIONS = {
action="store_true",
default=False,
),
"use_max_market_positions": Arg(
"--dmmp",
"--disable-max-market-positions",
help="Disable applying `max_open_trades` during backtest "
"(same as setting `max_open_trades` to a very high number).",
action="store_false",
default=True,
),
"backtest_show_pair_list": Arg(
"--show-pair-list",
help="Show backtesting pairlist sorted by profit.",

View File

@@ -86,7 +86,14 @@ def start_new_strategy(args: dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if "strategy" in args and args["strategy"]:
new_path = config["user_data_dir"] / USERPATH_STRATEGIES / (args["strategy"] + ".py")
if "strategy_path" in args and args["strategy_path"]:
strategy_dir = Path(args["strategy_path"])
else:
strategy_dir = config["user_data_dir"] / USERPATH_STRATEGIES
if not strategy_dir.is_dir():
logger.info(f"Creating strategy directory {strategy_dir}")
strategy_dir.mkdir(parents=True)
new_path = strategy_dir / (args["strategy"] + ".py")
if new_path.exists():
raise OperationalException(

View File

@@ -1,6 +1,5 @@
import logging
from pathlib import Path
from typing import Optional
import requests
@@ -24,7 +23,7 @@ def clean_ui_subdir(directory: Path):
p.rmdir()
def read_ui_version(dest_folder: Path) -> Optional[str]:
def read_ui_version(dest_folder: Path) -> str | None:
file = dest_folder / ".uiversion"
if not file.is_file():
return None
@@ -52,7 +51,7 @@ def download_and_install_ui(dest_folder: Path, dl_url: str, version: str):
f.write(version)
def get_ui_download_url(version: Optional[str] = None) -> tuple[str, str]:
def get_ui_download_url(version: str | None = None) -> tuple[str, str]:
base_url = "https://api.github.com/repos/freqtrade/frequi/"
# Get base UI Repo path

View File

@@ -15,7 +15,7 @@ def start_hyperopt_list(args: dict[str, Any]) -> None:
"""
from freqtrade.configuration import setup_utils_configuration
from freqtrade.data.btanalysis import get_latest_hyperopt_file
from freqtrade.optimize.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt_tools import HyperoptTools
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)

View File

@@ -1,7 +1,7 @@
import csv
import logging
import sys
from typing import Any, Union
from typing import Any
from freqtrade.enums import RunMode
from freqtrade.exceptions import ConfigurationError, OperationalException
@@ -87,7 +87,7 @@ def _print_objs_tabular(objs: list, print_colorized: bool) -> None:
from rich.text import Text
names = [s["name"] for s in objs]
objs_to_print: list[dict[str, Union[Text, str]]] = [
objs_to_print: list[dict[str, Text | str]] = [
{
"name": Text(s["name"] if s["name"] else "--"),
"location": s["location_rel"],

View File

@@ -517,8 +517,11 @@ CONF_SCHEMA = {
},
"exit_fill": {
"description": "Telegram setting for exit fill signals.",
"type": "string",
"enum": TELEGRAM_SETTING_OPTIONS,
"type": ["string", "object"],
"additionalProperties": {
"type": "string",
"enum": TELEGRAM_SETTING_OPTIONS,
},
"default": "on",
},
"exit_cancel": {

View File

@@ -5,9 +5,10 @@ This module contains the configuration class
import ast
import logging
import warnings
from collections.abc import Callable
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Optional
from typing import Any
from freqtrade import constants
from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings
@@ -37,9 +38,9 @@ class Configuration:
Reuse this class for the bot, backtesting, hyperopt and every script that required configuration
"""
def __init__(self, args: dict[str, Any], runmode: Optional[RunMode] = None) -> None:
def __init__(self, args: dict[str, Any], runmode: RunMode | None = None) -> None:
self.args = args
self.config: Optional[Config] = None
self.config: Config | None = None
self.runmode = runmode
def get_config(self) -> Config:
@@ -241,11 +242,7 @@ class Configuration:
logstring="Parameter --enable-protections detected, enabling Protections. ...",
)
if "use_max_market_positions" in self.args and not self.args["use_max_market_positions"]:
config.update({"use_max_market_positions": False})
logger.info("Parameter --disable-max-market-positions detected ...")
logger.info("max_open_trades set to unlimited ...")
elif "max_open_trades" in self.args and self.args["max_open_trades"]:
if "max_open_trades" in self.args and self.args["max_open_trades"]:
config.update({"max_open_trades": self.args["max_open_trades"]})
logger.info(
"Parameter --max-open-trades detected, overriding max_open_trades to: %s ...",
@@ -455,8 +452,8 @@ class Configuration:
config: Config,
argname: str,
logstring: str,
logfun: Optional[Callable] = None,
deprecated_msg: Optional[str] = None,
logfun: Callable | None = None,
deprecated_msg: str | None = None,
) -> None:
"""
:param config: Configuration dictionary

View File

@@ -3,7 +3,6 @@ Functions to handle deprecated settings
"""
import logging
from typing import Optional
from freqtrade.constants import Config
from freqtrade.exceptions import ConfigurationError, OperationalException
@@ -14,9 +13,9 @@ logger = logging.getLogger(__name__)
def check_conflicting_settings(
config: Config,
section_old: Optional[str],
section_old: str | None,
name_old: str,
section_new: Optional[str],
section_new: str | None,
name_new: str,
) -> None:
section_new_config = config.get(section_new, {}) if section_new else config
@@ -34,7 +33,7 @@ def check_conflicting_settings(
def process_removed_setting(
config: Config, section1: str, name1: str, section2: Optional[str], name2: str
config: Config, section1: str, name1: str, section2: str | None, name2: str
) -> None:
"""
:param section1: Removed section
@@ -54,9 +53,9 @@ def process_removed_setting(
def process_deprecated_setting(
config: Config,
section_old: Optional[str],
section_old: str | None,
name_old: str,
section_new: Optional[str],
section_new: str | None,
name_new: str,
) -> None:
check_conflicting_settings(config, section_old, name_old, section_new, name_new)

View File

@@ -1,7 +1,6 @@
import logging
import shutil
from pathlib import Path
from typing import Optional
from freqtrade.configuration.detect_environment import running_in_docker
from freqtrade.constants import (
@@ -18,7 +17,7 @@ from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__)
def create_datadir(config: Config, datadir: Optional[str] = None) -> Path:
def create_datadir(config: Config, datadir: str | None = None) -> Path:
folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
if not datadir:
# set datadir

View File

@@ -7,7 +7,7 @@ import re
import sys
from copy import deepcopy
from pathlib import Path
from typing import Any, Optional
from typing import Any
import rapidjson
@@ -78,7 +78,7 @@ def load_config_file(path: str) -> dict[str, Any]:
def load_from_files(
files: list[str], base_path: Optional[Path] = None, level: int = 0
files: list[str], base_path: Path | None = None, level: int = 0
) -> dict[str, Any]:
"""
Recursively load configuration files if specified.

View File

@@ -5,7 +5,6 @@ This module contains the argument manager class
import logging
import re
from datetime import datetime, timezone
from typing import Optional
from typing_extensions import Self
@@ -25,24 +24,24 @@ class TimeRange:
def __init__(
self,
starttype: Optional[str] = None,
stoptype: Optional[str] = None,
starttype: str | None = None,
stoptype: str | None = None,
startts: int = 0,
stopts: int = 0,
):
self.starttype: Optional[str] = starttype
self.stoptype: Optional[str] = stoptype
self.starttype: str | None = starttype
self.stoptype: str | None = stoptype
self.startts: int = startts
self.stopts: int = stopts
@property
def startdt(self) -> Optional[datetime]:
def startdt(self) -> datetime | None:
if self.startts:
return datetime.fromtimestamp(self.startts, tz=timezone.utc)
return None
@property
def stopdt(self) -> Optional[datetime]:
def stopdt(self) -> datetime | None:
if self.stopts:
return datetime.fromtimestamp(self.stopts, tz=timezone.utc)
return None
@@ -120,7 +119,7 @@ class TimeRange:
self.starttype = "date"
@classmethod
def parse_timerange(cls, text: Optional[str]) -> Self:
def parse_timerange(cls, text: str | None) -> Self:
"""
Parse the value of the argument --timerange to determine what is the range desired
:param text: value from --timerange

View File

@@ -4,7 +4,7 @@
bot constants
"""
from typing import Any, Literal, Optional
from typing import Any, Literal
from freqtrade.enums import CandleType, PriceType
@@ -38,6 +38,7 @@ HYPEROPT_LOSS_BUILTIN = [
"MaxDrawDownHyperOptLoss",
"MaxDrawDownRelativeHyperOptLoss",
"ProfitDrawDownHyperOptLoss",
"MultiMetricHyperOptLoss",
]
AVAILABLE_PAIRLISTS = [
"StaticPairList",
@@ -97,7 +98,7 @@ DL_DATA_TIMEFRAMES = ["1m", "5m"]
ENV_VAR_PREFIX = "FREQTRADE__"
CANCELED_EXCHANGE_STATES = ("cancelled", "canceled", "expired")
CANCELED_EXCHANGE_STATES = ("cancelled", "canceled", "expired", "rejected")
NON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ("closed",)
# Define decimals per coin for outputs
@@ -193,7 +194,7 @@ ListPairsWithTimeframes = list[PairWithTimeframe]
# Type for trades list
TradeList = list[list]
# ticks, pair, timeframe, CandleType
TickWithTimeframe = tuple[str, str, CandleType, Optional[int], Optional[int]]
TickWithTimeframe = tuple[str, str, CandleType, int | None, int | None]
ListTicksWithTimeframes = list[TickWithTimeframe]
LongShort = Literal["long", "short"]

View File

@@ -6,7 +6,7 @@ import logging
from copy import copy
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Literal, Optional, Union
from typing import Any, Literal
import numpy as np
import pandas as pd
@@ -53,7 +53,7 @@ BT_DATA_COLUMNS = [
]
def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> str:
def get_latest_optimize_filename(directory: Path | str, variant: str) -> str:
"""
Get latest backtest export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -84,7 +84,7 @@ def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> s
return data[f"latest_{variant}"]
def get_latest_backtest_filename(directory: Union[Path, str]) -> str:
def get_latest_backtest_filename(directory: Path | str) -> str:
"""
Get latest backtest export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -97,7 +97,7 @@ def get_latest_backtest_filename(directory: Union[Path, str]) -> str:
return get_latest_optimize_filename(directory, "backtest")
def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str:
def get_latest_hyperopt_filename(directory: Path | str) -> str:
"""
Get latest hyperopt export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -114,9 +114,7 @@ def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str:
return "hyperopt_results.pickle"
def get_latest_hyperopt_file(
directory: Union[Path, str], predef_filename: Optional[str] = None
) -> Path:
def get_latest_hyperopt_file(directory: Path | str, predef_filename: str | None = None) -> Path:
"""
Get latest hyperopt export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -137,7 +135,7 @@ def get_latest_hyperopt_file(
return directory / get_latest_hyperopt_filename(directory)
def load_backtest_metadata(filename: Union[Path, str]) -> dict[str, Any]:
def load_backtest_metadata(filename: Path | str) -> dict[str, Any]:
"""
Read metadata dictionary from backtest results file without reading and deserializing entire
file.
@@ -154,7 +152,7 @@ def load_backtest_metadata(filename: Union[Path, str]) -> dict[str, Any]:
raise OperationalException("Unexpected error while loading backtest metadata.") from e
def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType:
def load_backtest_stats(filename: Path | str) -> BacktestResultType:
"""
Load backtest statistics file.
:param filename: pathlib.Path object, or string pointing to the file.
@@ -276,7 +274,7 @@ def get_backtest_market_change(filename: Path, include_ts: bool = True) -> pd.Da
def find_existing_backtest_stats(
dirname: Union[Path, str], run_ids: dict[str, str], min_backtest_date: Optional[datetime] = None
dirname: Path | str, run_ids: dict[str, str], min_backtest_date: datetime | None = None
) -> dict[str, Any]:
"""
Find existing backtest stats that match specified run IDs and load them.
@@ -345,7 +343,7 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
return df
def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = None) -> pd.DataFrame:
def load_backtest_data(filename: Path | str, strategy: str | None = None) -> pd.DataFrame:
"""
Load backtest data file.
:param filename: pathlib.Path object, or string pointing to a file or directory
@@ -439,7 +437,7 @@ def evaluate_result_multi(
return df_final[df_final["open_trades"] > max_open_trades]
def trade_list_to_dataframe(trades: Union[list[Trade], list[LocalTrade]]) -> pd.DataFrame:
def trade_list_to_dataframe(trades: list[Trade] | list[LocalTrade]) -> pd.DataFrame:
"""
Convert list of Trade objects to pandas Dataframe
:param trades: List of trade objects
@@ -453,7 +451,7 @@ def trade_list_to_dataframe(trades: Union[list[Trade], list[LocalTrade]]) -> pd.
return df
def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataFrame:
def load_trades_from_db(db_url: str, strategy: str | None = None) -> pd.DataFrame:
"""
Load trades from a DB (using dburl)
:param db_url: Sqlite url (default format sqlite:///tradesv3.dry-run.sqlite)
@@ -476,7 +474,7 @@ def load_trades(
db_url: str,
exportfilename: Path,
no_trades: bool = False,
strategy: Optional[str] = None,
strategy: str | None = None,
) -> pd.DataFrame:
"""
Based on configuration option 'trade_source':

View File

@@ -8,7 +8,7 @@ Common Interface for bot and strategy to access data.
import logging
from collections import deque
from datetime import datetime, timezone
from typing import Any, Optional
from typing import Any
from pandas import DataFrame, Timedelta, Timestamp, to_timedelta
@@ -40,17 +40,17 @@ class DataProvider:
def __init__(
self,
config: Config,
exchange: Optional[Exchange],
exchange: Exchange | None,
pairlists=None,
rpc: Optional[RPCManager] = None,
rpc: RPCManager | None = None,
) -> None:
self._config = config
self._exchange = exchange
self._pairlists = pairlists
self.__rpc = rpc
self.__cached_pairs: dict[PairWithTimeframe, tuple[DataFrame, datetime]] = {}
self.__slice_index: Optional[int] = None
self.__slice_date: Optional[datetime] = None
self.__slice_index: int | None = None
self.__slice_date: datetime | None = None
self.__cached_pairs_backtesting: dict[PairWithTimeframe, DataFrame] = {}
self.__producer_pairs_df: dict[
@@ -255,8 +255,8 @@ class DataProvider:
def get_producer_df(
self,
pair: str,
timeframe: Optional[str] = None,
candle_type: Optional[CandleType] = None,
timeframe: str | None = None,
candle_type: CandleType | None = None,
producer_name: str = "default",
) -> tuple[DataFrame, datetime]:
"""
@@ -349,7 +349,7 @@ class DataProvider:
return total_candles
def get_pair_dataframe(
self, pair: str, timeframe: Optional[str] = None, candle_type: str = ""
self, pair: str, timeframe: str | None = None, candle_type: str = ""
) -> DataFrame:
"""
Return pair candle (OHLCV) data, either live or cached historical -- depending
@@ -437,7 +437,7 @@ class DataProvider:
def refresh(
self,
pairlist: ListPairsWithTimeframes,
helping_pairs: Optional[ListPairsWithTimeframes] = None,
helping_pairs: ListPairsWithTimeframes | None = None,
) -> None:
"""
Refresh data, called with each cycle
@@ -471,7 +471,7 @@ class DataProvider:
return list(self._exchange._klines.keys())
def ohlcv(
self, pair: str, timeframe: Optional[str] = None, copy: bool = True, candle_type: str = ""
self, pair: str, timeframe: str | None = None, copy: bool = True, candle_type: str = ""
) -> DataFrame:
"""
Get candle (OHLCV) data for the given pair as DataFrame
@@ -497,7 +497,7 @@ class DataProvider:
return DataFrame()
def trades(
self, pair: str, timeframe: Optional[str] = None, copy: bool = True, candle_type: str = ""
self, pair: str, timeframe: str | None = None, copy: bool = True, candle_type: str = ""
) -> DataFrame:
"""
Get candle (TRADES) data for the given pair as DataFrame
@@ -529,7 +529,7 @@ class DataProvider:
)
return trades_df
def market(self, pair: str) -> Optional[dict[str, Any]]:
def market(self, pair: str) -> dict[str, Any] | None:
"""
Return market data for the pair
:param pair: Pair to get the data for

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
from pandas import DataFrame, read_feather, to_datetime
@@ -37,7 +36,7 @@ class FeatherDataHandler(IDataHandler):
)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -59,20 +58,25 @@ class FeatherDataHandler(IDataHandler):
)
if not filename.exists():
return DataFrame(columns=self._columns)
pairdata = read_feather(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
try:
pairdata = read_feather(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
except Exception as e:
logger.exception(
f"Error loading data from {filename}. Exception: {e}. Returning empty dataframe."
)
return DataFrame(columns=self._columns)
def ohlcv_append(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
@@ -108,7 +112,7 @@ class FeatherDataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
import numpy as np
import pandas as pd
@@ -45,7 +44,7 @@ class HDF5DataHandler(IDataHandler):
)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> pd.DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -69,28 +68,36 @@ class HDF5DataHandler(IDataHandler):
)
if not filename.exists():
return pd.DataFrame(columns=self._columns)
where = []
if timerange:
if timerange.starttype == "date":
where.append(f"date >= Timestamp({timerange.startts * 1e9})")
if timerange.stoptype == "date":
where.append(f"date <= Timestamp({timerange.stopts * 1e9})")
try:
where = []
if timerange:
if timerange.starttype == "date":
where.append(f"date >= Timestamp({timerange.startts * 1e9})")
if timerange.stoptype == "date":
where.append(f"date <= Timestamp({timerange.stopts * 1e9})")
pairdata = pd.read_hdf(filename, key=key, mode="r", where=where)
pairdata = pd.read_hdf(filename, key=key, mode="r", where=where)
if list(pairdata.columns) != self._columns:
raise ValueError("Wrong dataframe format")
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata = pairdata.reset_index(drop=True)
return pairdata
if list(pairdata.columns) != self._columns:
raise ValueError("Wrong dataframe format")
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata = pairdata.reset_index(drop=True)
return pairdata
except ValueError:
raise
except Exception as e:
logger.exception(
f"Error loading data from {filename}. Exception: {e}. Returning empty dataframe."
)
return pd.DataFrame(columns=self._columns)
def ohlcv_append(
self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType
@@ -134,7 +141,7 @@ class HDF5DataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> pd.DataFrame:
"""
Load a pair from h5 file.

View File

@@ -10,7 +10,6 @@ from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
from pandas import DataFrame, to_datetime
@@ -126,7 +125,7 @@ class IDataHandler(ABC):
@abstractmethod
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -247,7 +246,7 @@ class IDataHandler(ABC):
@abstractmethod
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json
@@ -282,7 +281,7 @@ class IDataHandler(ABC):
return False
def trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json
@@ -370,7 +369,7 @@ class IDataHandler(ABC):
timeframe: str,
candle_type: CandleType,
*,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
fill_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
@@ -566,7 +565,7 @@ def get_datahandlerclass(datatype: str) -> type[IDataHandler]:
def get_datahandler(
datadir: Path, data_format: Optional[str] = None, data_handler: Optional[IDataHandler] = None
datadir: Path, data_format: str | None = None, data_handler: IDataHandler | None = None
) -> IDataHandler:
"""
:param datadir: Folder to save data

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
import numpy as np
from pandas import DataFrame, read_json, to_datetime
@@ -45,7 +44,7 @@ class JsonDataHandler(IDataHandler):
)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -119,7 +118,7 @@ class JsonDataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
from pandas import DataFrame, read_parquet, to_datetime
@@ -35,7 +34,7 @@ class ParquetDataHandler(IDataHandler):
data.reset_index(drop=True).loc[:, self._columns].to_parquet(filename)
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
self, pair: str, timeframe: str, timerange: TimeRange | None, candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
@@ -57,20 +56,25 @@ class ParquetDataHandler(IDataHandler):
)
if not filename.exists():
return DataFrame(columns=self._columns)
pairdata = read_parquet(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
try:
pairdata = read_parquet(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
except Exception as e:
logger.exception(
f"Error loading data from {filename}. Exception: {e}. Returning empty dataframe."
)
return DataFrame(columns=self._columns)
def ohlcv_append(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
@@ -106,7 +110,7 @@ class ParquetDataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: TimeRange | None = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json

View File

@@ -2,18 +2,11 @@ import logging
import operator
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
from pandas import DataFrame, concat
from freqtrade.configuration import TimeRange
from freqtrade.constants import (
DATETIME_PRINT_FORMAT,
DEFAULT_DATAFRAME_COLUMNS,
DL_DATA_TIMEFRAMES,
DOCS_LINK,
Config,
)
from freqtrade.constants import DATETIME_PRINT_FORMAT, DL_DATA_TIMEFRAMES, DOCS_LINK, Config
from freqtrade.data.converter import (
clean_ohlcv_dataframe,
convert_trades_to_ohlcv,
@@ -25,8 +18,9 @@ from freqtrade.enums import CandleType, TradingMode
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import Exchange
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
from freqtrade.util import dt_now, dt_ts, format_ms_time, get_progress_tracker
from freqtrade.util import dt_now, dt_ts, format_ms_time
from freqtrade.util.migrations import migrate_data
from freqtrade.util.progress_tracker import CustomProgress, retrieve_progress_tracker
logger = logging.getLogger(__name__)
@@ -37,12 +31,12 @@ def load_pair_history(
timeframe: str,
datadir: Path,
*,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
fill_up_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
data_format: Optional[str] = None,
data_handler: Optional[IDataHandler] = None,
data_format: str | None = None,
data_handler: IDataHandler | None = None,
candle_type: CandleType = CandleType.SPOT,
) -> DataFrame:
"""
@@ -79,13 +73,13 @@ def load_data(
timeframe: str,
pairs: list[str],
*,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
fill_up_missing: bool = True,
startup_candles: int = 0,
fail_without_data: bool = False,
data_format: str = "feather",
candle_type: CandleType = CandleType.SPOT,
user_futures_funding_rate: Optional[int] = None,
user_futures_funding_rate: int | None = None,
) -> dict[str, DataFrame]:
"""
Load ohlcv history data for a list of pairs.
@@ -137,8 +131,8 @@ def refresh_data(
timeframe: str,
pairs: list[str],
exchange: Exchange,
data_format: Optional[str] = None,
timerange: Optional[TimeRange] = None,
data_format: str | None = None,
timerange: TimeRange | None = None,
candle_type: CandleType,
) -> None:
"""
@@ -168,11 +162,11 @@ def refresh_data(
def _load_cached_data_for_updating(
pair: str,
timeframe: str,
timerange: Optional[TimeRange],
timerange: TimeRange | None,
data_handler: IDataHandler,
candle_type: CandleType,
prepend: bool = False,
) -> tuple[DataFrame, Optional[int], Optional[int]]:
) -> tuple[DataFrame, int | None, int | None]:
"""
Load cached data to download more data.
If timerange is passed in, checks whether data from an before the stored data will be
@@ -200,14 +194,21 @@ def _load_cached_data_for_updating(
candle_type=candle_type,
)
if not data.empty:
if not prepend and start and start < data.iloc[0]["date"]:
# Earlier data than existing data requested, redownload all
data = DataFrame(columns=DEFAULT_DATAFRAME_COLUMNS)
if prepend:
end = data.iloc[0]["date"]
else:
if prepend:
end = data.iloc[0]["date"]
else:
start = data.iloc[-1]["date"]
if start and start < data.iloc[0]["date"]:
# Earlier data than existing data requested, Update start date
logger.info(
f"{pair}, {timeframe}, {candle_type}: "
f"Requested start date {start:{DATETIME_PRINT_FORMAT}} earlier than local "
f"data start date {data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}. "
f"Use `--prepend` to download data prior "
f"to {data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}, or "
"`--erase` to redownload all data."
)
start = data.iloc[-1]["date"]
start_ms = int(start.timestamp() * 1000) if start else None
end_ms = int(end.timestamp() * 1000) if end else None
return data, start_ms, end_ms
@@ -220,8 +221,8 @@ def _download_pair_history(
exchange: Exchange,
timeframe: str = "5m",
new_pairs_days: int = 30,
data_handler: Optional[IDataHandler] = None,
timerange: Optional[TimeRange] = None,
data_handler: IDataHandler | None = None,
timerange: TimeRange | None = None,
candle_type: CandleType,
erase: bool = False,
prepend: bool = False,
@@ -322,21 +323,24 @@ def refresh_backtest_ohlcv_data(
timeframes: list[str],
datadir: Path,
trading_mode: str,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
new_pairs_days: int = 30,
erase: bool = False,
data_format: Optional[str] = None,
data_format: str | None = None,
prepend: bool = False,
progress_tracker: CustomProgress | None = None,
) -> list[str]:
"""
Refresh stored ohlcv data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
progress_tracker = retrieve_progress_tracker(progress_tracker)
pairs_not_available = []
data_handler = get_datahandler(datadir, data_format)
candle_type = CandleType.get_default(trading_mode)
with get_progress_tracker() as progress:
with progress_tracker as progress:
tf_length = len(timeframes) if trading_mode != "futures" else len(timeframes) + 2
timeframe_task = progress.add_task("Timeframe", total=tf_length)
pair_task = progress.add_task("Downloading data...", total=len(pairs))
@@ -346,7 +350,7 @@ def refresh_backtest_ohlcv_data(
progress.update(timeframe_task, completed=0)
if pair not in exchange.markets:
pairs_not_available.append(pair)
pairs_not_available.append(f"{pair}: Pair not available on exchange.")
logger.info(f"Skipping pair {pair}...")
continue
for timeframe in timeframes:
@@ -404,7 +408,7 @@ def _download_trades_history(
pair: str,
*,
new_pairs_days: int = 30,
timerange: Optional[TimeRange] = None,
timerange: TimeRange | None = None,
data_handler: IDataHandler,
trading_mode: TradingMode,
) -> bool:
@@ -412,79 +416,74 @@ def _download_trades_history(
Download trade history from the exchange.
Appends to previously downloaded trades data.
"""
try:
until = None
since = 0
if timerange:
if timerange.starttype == "date":
since = timerange.startts * 1000
if timerange.stoptype == "date":
until = timerange.stopts * 1000
until = None
since = 0
if timerange:
if timerange.starttype == "date":
since = timerange.startts * 1000
if timerange.stoptype == "date":
until = timerange.stopts * 1000
trades = data_handler.trades_load(pair, trading_mode)
trades = data_handler.trades_load(pair, trading_mode)
# TradesList columns are defined in constants.DEFAULT_TRADES_COLUMNS
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
# TradesList columns are defined in constants.DEFAULT_TRADES_COLUMNS
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
if not trades.empty and since > 0 and since < trades.iloc[0]["timestamp"]:
# since is before the first trade
logger.info(
f"Start ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}) earlier than "
f"available data. Redownloading trades for {pair}..."
)
trades = trades_list_to_df([])
from_id = trades.iloc[-1]["id"] if not trades.empty else None
if not trades.empty and since < trades.iloc[-1]["timestamp"]:
# Reset since to the last available point
# - 5 seconds (to ensure we're getting all trades)
since = trades.iloc[-1]["timestamp"] - (5 * 1000)
logger.info(
f"Using last trade date -5s - Downloading trades for {pair} "
f"since: {format_ms_time(since)}."
)
if not since:
since = dt_ts(dt_now() - timedelta(days=new_pairs_days))
logger.debug(
"Current Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
if not trades.empty and since > 0 and since < trades.iloc[0]["timestamp"]:
# since is before the first trade
raise ValueError(
f"Start {format_ms_time(since)} earlier than "
f"available data ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}). "
f"Please use `--erase` if you'd like to redownload {pair}."
)
logger.debug(
"Current End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"Current Amount of trades: {len(trades)}")
# Default since_ms to 30 days if nothing is given
new_trades = exchange.get_historic_trades(
pair=pair,
since=since,
until=until,
from_id=from_id,
from_id = trades.iloc[-1]["id"] if not trades.empty else None
if not trades.empty and since < trades.iloc[-1]["timestamp"]:
# Reset since to the last available point
# - 5 seconds (to ensure we're getting all trades)
since = trades.iloc[-1]["timestamp"] - (5 * 1000)
logger.info(
f"Using last trade date -5s - Downloading trades for {pair} "
f"since: {format_ms_time(since)}."
)
new_trades_df = trades_list_to_df(new_trades[1])
trades = concat([trades, new_trades_df], axis=0)
# Remove duplicates to make sure we're not storing data we don't need
trades = trades_df_remove_duplicates(trades)
data_handler.trades_store(pair, trades, trading_mode)
logger.debug(
"New Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"New End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"New Amount of trades: {len(trades)}")
return True
if not since:
since = dt_ts(dt_now() - timedelta(days=new_pairs_days))
except Exception:
logger.exception(f'Failed to download and store historic trades for pair: "{pair}". ')
return False
logger.debug(
"Current Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"Current End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"Current Amount of trades: {len(trades)}")
# Default since_ms to 30 days if nothing is given
new_trades = exchange.get_historic_trades(
pair=pair,
since=since,
until=until,
from_id=from_id,
)
new_trades_df = trades_list_to_df(new_trades[1])
trades = concat([trades, new_trades_df], axis=0)
# Remove duplicates to make sure we're not storing data we don't need
trades = trades_df_remove_duplicates(trades)
data_handler.trades_store(pair, trades, trading_mode)
logger.debug(
"New Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"New End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"New Amount of trades: {len(trades)}")
return True
def refresh_backtest_trades_data(
@@ -496,20 +495,22 @@ def refresh_backtest_trades_data(
new_pairs_days: int = 30,
erase: bool = False,
data_format: str = "feather",
progress_tracker: CustomProgress | None = None,
) -> list[str]:
"""
Refresh stored trades data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
:return: List of pairs that are not available.
"""
progress_tracker = retrieve_progress_tracker(progress_tracker)
pairs_not_available = []
data_handler = get_datahandler(datadir, data_format=data_format)
with get_progress_tracker() as progress:
with progress_tracker as progress:
pair_task = progress.add_task("Downloading data...", total=len(pairs))
for pair in pairs:
progress.update(pair_task, description=f"Downloading trades [{pair}]")
if pair not in exchange.markets:
pairs_not_available.append(pair)
pairs_not_available.append(f"{pair}: Pair not available on exchange.")
logger.info(f"Skipping pair {pair}...")
continue
@@ -518,14 +519,22 @@ def refresh_backtest_trades_data(
logger.info(f"Deleting existing data for pair {pair}.")
logger.info(f"Downloading trades for pair {pair}.")
_download_trades_history(
exchange=exchange,
pair=pair,
new_pairs_days=new_pairs_days,
timerange=timerange,
data_handler=data_handler,
trading_mode=trading_mode,
)
try:
_download_trades_history(
exchange=exchange,
pair=pair,
new_pairs_days=new_pairs_days,
timerange=timerange,
data_handler=data_handler,
trading_mode=trading_mode,
)
except ValueError as e:
pairs_not_available.append(f"{pair}: {str(e)}")
except Exception:
logger.exception(
f'Failed to download and store historic trades for pair: "{pair}". '
)
progress.update(pair_task, advance=1)
return pairs_not_available
@@ -577,6 +586,22 @@ def validate_backtest_data(
def download_data_main(config: Config) -> None:
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
exchange = ExchangeResolver.load_exchange(config, validate=False)
download_data(config, exchange)
def download_data(
config: Config,
exchange: Exchange,
*,
progress_tracker: CustomProgress | None = None,
) -> None:
"""
Download data function. Used from both cli and API.
"""
timerange = TimeRange()
if "days" in config:
time_since = (datetime.now() - timedelta(days=config["days"])).strftime("%Y%m%d")
@@ -590,10 +615,6 @@ def download_data_main(config: Config) -> None:
pairs_not_available: list[str] = []
# Init exchange
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
exchange = ExchangeResolver.load_exchange(config, validate=False)
available_pairs = [
p
for p in exchange.get_markets(
@@ -605,17 +626,18 @@ def download_data_main(config: Config) -> None:
if "timeframes" not in config:
config["timeframes"] = DL_DATA_TIMEFRAMES
logger.info(
f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}"
)
if len(expanded_pairs) == 0:
logger.warning(
"No pairs available for download. "
"Please make sure you're using the correct Pair naming for your selected trade mode. \n"
f"More info: {DOCS_LINK}/bot-basics/#pair-naming"
)
return
logger.info(
f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}"
)
for timeframe in config["timeframes"]:
exchange.validate_timeframes(timeframe)
@@ -637,6 +659,7 @@ def download_data_main(config: Config) -> None:
erase=bool(config.get("erase")),
data_format=config["dataformat_trades"],
trading_mode=config.get("trading_mode", TradingMode.SPOT),
progress_tracker=progress_tracker,
)
if config.get("convert_trades") or not exchange.get_option("ohlcv_has_history", True):
@@ -672,10 +695,12 @@ def download_data_main(config: Config) -> None:
data_format=config["dataformat_ohlcv"],
trading_mode=config.get("trading_mode", "spot"),
prepend=config.get("prepend_data", False),
progress_tracker=progress_tracker,
)
finally:
if pairs_not_available:
logger.info(
f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}."
errors = "\n" + ("\n".join(pairs_not_available))
logger.warning(
f"Encountered a problem downloading the following pairs from {exchange.name}: "
f"{errors}"
)

View File

@@ -3,7 +3,6 @@
import logging
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
import ccxt
@@ -53,12 +52,12 @@ class Binance(Exchange):
(TradingMode.FUTURES, MarginMode.ISOLATED)
]
def get_tickers(self, symbols: Optional[list[str]] = None, cached: bool = False) -> Tickers:
def get_tickers(self, symbols: list[str] | None = None, *, cached: bool = False) -> Tickers:
tickers = super().get_tickers(symbols=symbols, cached=cached)
if self.trading_mode == TradingMode.FUTURES:
# Binance's future result has no bid/ask values.
# Therefore we must fetch that from fetch_bids_asks and combine the two results.
bidsasks = self.fetch_bids_asks(symbols, cached)
bidsasks = self.fetch_bids_asks(symbols, cached=cached)
tickers = deep_merge_dicts(bidsasks, tickers, allow_null_overrides=False)
return tickers
@@ -106,7 +105,7 @@ class Binance(Exchange):
candle_type: CandleType,
is_new_pair: bool = False,
raise_: bool = False,
until_ms: Optional[int] = None,
until_ms: int | None = None,
) -> OHLCVResponse:
"""
Overwrite to introduce "fast new pair" functionality by detecting the pair's listing date
@@ -144,9 +143,7 @@ class Binance(Exchange):
"""
return open_date.minute == 0 and open_date.second < 15
def fetch_funding_rates(
self, symbols: Optional[list[str]] = None
) -> dict[str, dict[str, float]]:
def fetch_funding_rates(self, symbols: list[str] | None = None) -> dict[str, dict[str, float]]:
"""
Fetch funding rates for the given symbols.
:param symbols: List of symbols to fetch funding rates for
@@ -177,7 +174,7 @@ class Binance(Exchange):
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> Optional[float]:
) -> float | None:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
MARGIN: https://www.binance.com/en/support/faq/f6b010588e55413aa58b7d63ee0125ed
@@ -263,3 +260,19 @@ class Binance(Exchange):
return self.get_leverage_tiers()
else:
return {}
async def _async_get_trade_history_id_startup(
self, pair: str, since: int | None
) -> tuple[list[list], str]:
"""
override for initial call
Binance only provides a limited set of historic trades data.
Using from_id=0, we can get the earliest available trades.
So if we don't get any data with the provided "since", we can assume to
download all available data.
"""
t, from_id = await self._async_fetch_trades(pair, since=since)
if not t:
return [], "0"
return t, from_id

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,6 @@
import logging
from datetime import datetime, timezone
from typing import Optional
from freqtrade.exchange import Exchange
@@ -17,7 +16,7 @@ class Bitpanda(Exchange):
"""
def get_trades_for_order(
self, order_id: str, pair: str, since: datetime, params: Optional[dict] = None
self, order_id: str, pair: str, since: datetime, params: dict | None = None
) -> list:
"""
Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id.

View File

@@ -2,16 +2,16 @@
import logging
from datetime import datetime, timedelta
from typing import Any, Optional
from typing import Any
import ccxt
from freqtrade.constants import BuySell
from freqtrade.enums import CandleType, MarginMode, PriceType, TradingMode
from freqtrade.enums import MarginMode, PriceType, TradingMode
from freqtrade.exceptions import DDosProtection, ExchangeError, OperationalException, TemporaryError
from freqtrade.exchange import Exchange
from freqtrade.exchange.common import retrier
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.util.datetime_helpers import dt_now, dt_ts
@@ -47,6 +47,7 @@ class Bybit(Exchange):
"ohlcv_has_history": True,
"mark_ohlcv_timeframe": "4h",
"funding_fee_timeframe": "8h",
"funding_fee_candle_limit": 200,
"stoploss_on_exchange": True,
"stoploss_order_types": {"limit": "limit", "market": "market"},
# bybit response parsing fails to populate stopLossPrice
@@ -114,14 +115,6 @@ class Bybit(Exchange):
except ccxt.BaseError as e:
raise OperationalException(e) from e
def ohlcv_candle_limit(
self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None
) -> int:
if candle_type == CandleType.FUNDING_RATE:
return 200
return super().ohlcv_candle_limit(timeframe, candle_type, since_ms)
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
if self.trading_mode != TradingMode.SPOT:
params = {"leverage": leverage}
@@ -147,6 +140,17 @@ class Bybit(Exchange):
params["position_idx"] = 0
return params
def _order_needs_price(self, side: BuySell, ordertype: str) -> bool:
# Bybit requires price for market orders - but only for classic accounts,
# and only in spot mode
return (
ordertype != "market"
or (
side == "buy" and not self.unified_account and self.trading_mode == TradingMode.SPOT
)
or self._ft_has.get("marketOrderRequiresPrice", False)
)
def dry_run_liquidation_price(
self,
pair: str,
@@ -157,7 +161,7 @@ class Bybit(Exchange):
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> Optional[float]:
) -> float | None:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
PERPETUAL:
@@ -229,7 +233,9 @@ class Bybit(Exchange):
logger.warning(f"Could not update funding fees for {pair}.")
return 0.0
def fetch_orders(self, pair: str, since: datetime, params: Optional[dict] = None) -> list[dict]:
def fetch_orders(
self, pair: str, since: datetime, params: dict | None = None
) -> list[CcxtOrder]:
"""
Fetch all orders for a pair "since"
:param pair: Pair for the query
@@ -246,7 +252,7 @@ class Bybit(Exchange):
return orders
def fetch_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_order(self, order_id: str, pair: str, params: dict | None = None) -> CcxtOrder:
if self.exchange_has("fetchOrder"):
# Set acknowledged to True to avoid ccxt exception
params = {"acknowledged": True}

View File

@@ -1,8 +1,9 @@
import asyncio
import logging
import time
from collections.abc import Callable
from functools import wraps
from typing import Any, Callable, Optional, TypeVar, cast, overload
from typing import Any, TypeVar, cast, overload
from freqtrade.constants import ExchangeConfig
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
@@ -57,6 +58,7 @@ SUPPORTED_EXCHANGES = [
"bybit",
"gate",
"htx",
"hyperliquid",
"kraken",
"okx",
]
@@ -172,7 +174,7 @@ def retrier(_func: F, *, retries=API_RETRY_COUNT) -> F: ...
def retrier(*, retries=API_RETRY_COUNT) -> Callable[[F], F]: ...
def retrier(_func: Optional[F] = None, *, retries=API_RETRY_COUNT):
def retrier(_func: F | None = None, *, retries=API_RETRY_COUNT):
def decorator(f: F) -> F:
@wraps(f)
def wrapper(*args, **kwargs):
@@ -185,7 +187,7 @@ def retrier(_func: Optional[F] = None, *, retries=API_RETRY_COUNT):
logger.warning(msg + f"Retrying still for {count} times.")
count -= 1
kwargs.update({"count": count})
if isinstance(ex, (DDosProtection, RetryableOrderError)):
if isinstance(ex, DDosProtection | RetryableOrderError):
# increasing backoff
backoff_delay = calculate_backoff(count + 1, retries)
logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")

View File

@@ -12,7 +12,7 @@ from copy import deepcopy
from datetime import datetime, timedelta, timezone
from math import floor, isnan
from threading import Lock
from typing import Any, Literal, Optional, Union
from typing import Any, Literal, TypeGuard
import ccxt
import ccxt.pro as ccxt_pro
@@ -70,6 +70,7 @@ from freqtrade.exchange.common import (
)
from freqtrade.exchange.exchange_types import (
CcxtBalances,
CcxtOrder,
CcxtPosition,
FtHas,
OHLCVResponse,
@@ -169,7 +170,7 @@ class Exchange:
self,
config: Config,
*,
exchange_config: Optional[ExchangeConfig] = None,
exchange_config: ExchangeConfig | None = None,
validate: bool = True,
load_leverage_tiers: bool = False,
) -> None:
@@ -181,7 +182,7 @@ class Exchange:
self._api: ccxt.Exchange
self._api_async: ccxt_pro.Exchange
self._ws_async: ccxt_pro.Exchange = None
self._exchange_ws: Optional[ExchangeWS] = None
self._exchange_ws: ExchangeWS | None = None
self._markets: dict = {}
self._trading_fees: dict[str, Any] = {}
self._leverage_tiers: dict[str, list[dict]] = {}
@@ -198,8 +199,8 @@ class Exchange:
# Timestamp of last markets refresh
self._last_markets_refresh: int = 0
# Cache for 10 minutes ...
self._cache_lock = Lock()
# Cache for 10 minutes ...
self._fetch_tickers_cache: TTLCache = TTLCache(maxsize=2, ttl=60 * 10)
# Cache values for 300 to avoid frequent polling of the exchange for prices
# Caching only applies to RPC methods, so prices for open trades are still
@@ -378,7 +379,7 @@ class Exchange:
logger.info("Applying additional ccxt config: %s", ccxt_kwargs)
if self._ccxt_params:
# Inject static options after the above output to not confuse users.
ccxt_kwargs = deep_merge_dicts(self._ccxt_params, ccxt_kwargs)
ccxt_kwargs = deep_merge_dicts(self._ccxt_params, deepcopy(ccxt_kwargs))
if ccxt_kwargs:
ex_config.update(ccxt_kwargs)
try:
@@ -452,28 +453,31 @@ class Exchange:
logger.info(f"API {endpoint}: {add_info_str}{response}")
def ohlcv_candle_limit(
self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None
self, timeframe: str, candle_type: CandleType, since_ms: int | None = None
) -> int:
"""
Exchange ohlcv candle limit
Uses ohlcv_candle_limit_per_timeframe if the exchange has different limits
per timeframe (e.g. bittrex), otherwise falls back to ohlcv_candle_limit
TODO: this is most likely no longer needed since only bittrex needed this.
:param timeframe: Timeframe to check
:param candle_type: Candle-type
:param since_ms: Starting timestamp
:return: Candle limit as integer
"""
fallback_val = self._ft_has.get("ohlcv_candle_limit")
if candle_type == CandleType.FUNDING_RATE:
fallback_val = self._ft_has.get("funding_fee_candle_limit", fallback_val)
return int(
self._ft_has.get("ohlcv_candle_limit_per_timeframe", {}).get(
timeframe, str(self._ft_has.get("ohlcv_candle_limit"))
timeframe, str(fallback_val)
)
)
def get_markets(
self,
base_currencies: Optional[list[str]] = None,
quote_currencies: Optional[list[str]] = None,
base_currencies: list[str] | None = None,
quote_currencies: list[str] | None = None,
spot_only: bool = False,
margin_only: bool = False,
futures_only: bool = False,
@@ -522,6 +526,7 @@ class Exchange:
def market_is_future(self, market: dict[str, Any]) -> bool:
return (
market.get(self._ft_has["ccxt_futures_name"], False) is True
and market.get("type", False) == "swap"
and market.get("linear", False) is True
)
@@ -566,7 +571,7 @@ class Exchange:
else:
return DataFrame(columns=DEFAULT_TRADES_COLUMNS)
def get_contract_size(self, pair: str) -> Optional[float]:
def get_contract_size(self, pair: str) -> float | None:
if self.trading_mode == TradingMode.FUTURES:
market = self.markets.get(pair, {})
contract_size: float = 1.0
@@ -587,7 +592,7 @@ class Exchange:
trade["amount"] = trade["amount"] * contract_size
return trades
def _order_contracts_to_amount(self, order: dict) -> dict:
def _order_contracts_to_amount(self, order: CcxtOrder) -> CcxtOrder:
if "symbol" in order and order["symbol"] is not None:
contract_size = self.get_contract_size(order["symbol"])
if contract_size != 1:
@@ -709,7 +714,7 @@ class Exchange:
return pair
raise ValueError(f"Could not combine {curr_1} and {curr_2} to get a valid pair.")
def validate_timeframes(self, timeframe: Optional[str]) -> None:
def validate_timeframes(self, timeframe: str | None) -> None:
"""
Check if timeframe from config is a supported timeframe on the exchange
"""
@@ -839,7 +844,7 @@ class Exchange:
def validate_trading_mode_and_margin_mode(
self,
trading_mode: TradingMode,
margin_mode: Optional[MarginMode], # Only None when trading_mode = TradingMode.SPOT
margin_mode: MarginMode | None, # Only None when trading_mode = TradingMode.SPOT
):
"""
Checks if freqtrade can perform trades using the configured
@@ -855,7 +860,7 @@ class Exchange:
f"Freqtrade does not support {mm_value} {trading_mode} on {self.name}"
)
def get_option(self, param: str, default: Optional[Any] = None) -> Any:
def get_option(self, param: str, default: Any | None = None) -> Any:
"""
Get parameter value from _ft_has
"""
@@ -872,7 +877,7 @@ class Exchange:
return self._ft_has["exchange_has_overrides"][endpoint]
return endpoint in self._api_async.has and self._api_async.has[endpoint]
def get_precision_amount(self, pair: str) -> Optional[float]:
def get_precision_amount(self, pair: str) -> float | None:
"""
Returns the amount precision of the exchange.
:param pair: Pair to get precision for
@@ -880,7 +885,7 @@ class Exchange:
"""
return self.markets.get(pair, {}).get("precision", {}).get("amount", None)
def get_precision_price(self, pair: str) -> Optional[float]:
def get_precision_price(self, pair: str) -> float | None:
"""
Returns the price precision of the exchange.
:param pair: Pair to get precision for
@@ -920,8 +925,8 @@ class Exchange:
return 1 / pow(10, precision)
def get_min_pair_stake_amount(
self, pair: str, price: float, stoploss: float, leverage: Optional[float] = 1.0
) -> Optional[float]:
self, pair: str, price: float, stoploss: float, leverage: float | None = 1.0
) -> float | None:
return self._get_stake_amount_limit(pair, price, stoploss, "min", leverage)
def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float:
@@ -939,8 +944,8 @@ class Exchange:
price: float,
stoploss: float,
limit: Literal["min", "max"],
leverage: Optional[float] = 1.0,
) -> Optional[float]:
leverage: float | None = 1.0,
) -> float | None:
isMin = limit == "min"
try:
@@ -997,20 +1002,20 @@ class Exchange:
self,
pair: str,
ordertype: str,
side: str,
side: BuySell,
amount: float,
rate: float,
leverage: float,
params: Optional[dict] = None,
params: dict | None = None,
stop_loss: bool = False,
) -> dict[str, Any]:
) -> CcxtOrder:
now = dt_now()
order_id = f"dry_run_{side}_{pair}_{now.timestamp()}"
# Rounding here must respect to contract sizes
_amount = self._contracts_to_amount(
pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))
)
dry_order: dict[str, Any] = {
dry_order: CcxtOrder = {
"id": order_id,
"symbol": pair,
"price": rate,
@@ -1026,14 +1031,13 @@ class Exchange:
"status": "open",
"fee": None,
"info": {},
"leverage": leverage,
}
if stop_loss:
dry_order["info"] = {"stopPrice": dry_order["price"]}
dry_order[self._ft_has["stop_price_prop"]] = dry_order["price"]
# Workaround to avoid filling stoploss orders immediately
dry_order["ft_order_type"] = "stoploss"
orderbook: Optional[OrderBook] = None
orderbook: OrderBook | None = None
if self.exchange_has("fetchL2OrderBook"):
orderbook = self.fetch_l2_order_book(pair, 20)
if ordertype == "limit" and orderbook:
@@ -1055,7 +1059,7 @@ class Exchange:
"filled": _amount,
"remaining": 0.0,
"status": "closed",
"cost": (dry_order["amount"] * average),
"cost": (_amount * average),
}
)
# market orders will always incurr taker fees
@@ -1072,9 +1076,9 @@ class Exchange:
def add_dry_order_fee(
self,
pair: str,
dry_order: dict[str, Any],
dry_order: CcxtOrder,
taker_or_maker: MakerTaker,
) -> dict[str, Any]:
) -> CcxtOrder:
fee = self.get_fee(pair, taker_or_maker=taker_or_maker)
dry_order.update(
{
@@ -1088,7 +1092,7 @@ class Exchange:
return dry_order
def get_dry_market_fill_price(
self, pair: str, side: str, amount: float, rate: float, orderbook: Optional[OrderBook]
self, pair: str, side: str, amount: float, rate: float, orderbook: OrderBook | None
) -> float:
"""
Get the market order fill price based on orderbook interpolation
@@ -1136,7 +1140,7 @@ class Exchange:
pair: str,
side: str,
limit: float,
orderbook: Optional[OrderBook] = None,
orderbook: OrderBook | None = None,
offset: float = 0.0,
) -> bool:
if not self.exchange_has("fetchL2OrderBook"):
@@ -1158,8 +1162,8 @@ class Exchange:
return False
def check_dry_limit_order_filled(
self, order: dict[str, Any], immediate: bool = False, orderbook: Optional[OrderBook] = None
) -> dict[str, Any]:
self, order: CcxtOrder, immediate: bool = False, orderbook: OrderBook | None = None
) -> CcxtOrder:
"""
Check dry-run limit order fill and update fee (if it filled).
"""
@@ -1186,7 +1190,7 @@ class Exchange:
return order
def fetch_dry_run_order(self, order_id) -> dict[str, Any]:
def fetch_dry_run_order(self, order_id) -> CcxtOrder:
"""
Return dry-run order
Only call if running in dry-run mode.
@@ -1248,7 +1252,7 @@ class Exchange:
leverage: float,
reduceOnly: bool = False,
time_in_force: str = "GTC",
) -> dict:
) -> CcxtOrder:
if self._config["dry_run"]:
dry_order = self.create_dry_run_order(
pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage
@@ -1306,7 +1310,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def stoploss_adjust(self, stop_loss: float, order: dict, side: str) -> bool:
def stoploss_adjust(self, stop_loss: float, order: CcxtOrder, side: str) -> bool:
"""
Verify stop_loss against stoploss-order value (limit or price)
Returns True if adjustment is necessary.
@@ -1367,7 +1371,7 @@ class Exchange:
order_types: dict,
side: BuySell,
leverage: float,
) -> dict:
) -> CcxtOrder:
"""
creates a stoploss order.
requires `_ft_has['stoploss_order_types']` to be set as a dict mapping limit and market
@@ -1460,7 +1464,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def fetch_order_emulated(self, order_id: str, pair: str, params: dict) -> dict:
def fetch_order_emulated(self, order_id: str, pair: str, params: dict) -> CcxtOrder:
"""
Emulated fetch_order if the exchange doesn't support fetch_order, but requires separate
calls for open and closed orders.
@@ -1494,7 +1498,7 @@ class Exchange:
raise OperationalException(e) from e
@retrier(retries=API_FETCH_ORDER_RETRY_COUNT)
def fetch_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_order(self, order_id: str, pair: str, params: dict | None = None) -> CcxtOrder:
if self._config["dry_run"]:
return self.fetch_dry_run_order(order_id)
if params is None:
@@ -1523,12 +1527,14 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
return self.fetch_order(order_id, pair, params)
def fetch_order_or_stoploss_order(
self, order_id: str, pair: str, stoploss_order: bool = False
) -> dict:
) -> CcxtOrder:
"""
Simple wrapper calling either fetch_order or fetch_stoploss_order depending on
the stoploss_order parameter
@@ -1540,7 +1546,7 @@ class Exchange:
return self.fetch_stoploss_order(order_id, pair)
return self.fetch_order(order_id, pair)
def check_order_canceled_empty(self, order: dict) -> bool:
def check_order_canceled_empty(self, order: CcxtOrder) -> bool:
"""
Verify if an order has been cancelled without being partially filled
:param order: Order dict as returned from fetch_order()
@@ -1549,7 +1555,7 @@ class Exchange:
return order.get("status") in NON_OPEN_EXCHANGE_STATES and order.get("filled") == 0.0
@retrier
def cancel_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def cancel_order(self, order_id: str, pair: str, params: dict | None = None) -> dict[str, Any]:
if self._config["dry_run"]:
try:
order = self.fetch_dry_run_order(order_id)
@@ -1577,19 +1583,17 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def cancel_stoploss_order(
self, order_id: str, pair: str, params: Optional[dict] = None
) -> dict:
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
return self.cancel_order(order_id, pair, params)
def is_cancel_order_result_suitable(self, corder) -> bool:
def is_cancel_order_result_suitable(self, corder) -> TypeGuard[CcxtOrder]:
if not isinstance(corder, dict):
return False
required = ("fee", "status", "amount")
return all(corder.get(k, None) is not None for k in required)
def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> dict:
def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> CcxtOrder:
"""
Cancel order returning a result.
Creates a fake result if cancel order returns a non-usable result
@@ -1620,7 +1624,9 @@ class Exchange:
return order
def cancel_stoploss_order_with_result(self, order_id: str, pair: str, amount: float) -> dict:
def cancel_stoploss_order_with_result(
self, order_id: str, pair: str, amount: float
) -> CcxtOrder:
"""
Cancel stoploss order returning a result.
Creates a fake result if cancel order returns a non-usable result
@@ -1651,6 +1657,7 @@ class Exchange:
balances.pop("total", None)
balances.pop("used", None)
self._log_exchange_response("fetch_balances", balances)
return balances
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
@@ -1662,7 +1669,7 @@ class Exchange:
raise OperationalException(e) from e
@retrier
def fetch_positions(self, pair: Optional[str] = None) -> list[CcxtPosition]:
def fetch_positions(self, pair: str | None = None) -> list[CcxtPosition]:
"""
Fetch positions from the exchange.
If no pair is given, all positions are returned.
@@ -1686,7 +1693,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[dict]:
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[CcxtOrder]:
orders = []
if self.exchange_has("fetchClosedOrders"):
orders = self._api.fetch_closed_orders(pair, since=since_ms)
@@ -1696,7 +1703,9 @@ class Exchange:
return orders
@retrier(retries=0)
def fetch_orders(self, pair: str, since: datetime, params: Optional[dict] = None) -> list[dict]:
def fetch_orders(
self, pair: str, since: datetime, params: dict | None = None
) -> list[CcxtOrder]:
"""
Fetch all orders for a pair "since"
:param pair: Pair for the query
@@ -1712,7 +1721,9 @@ class Exchange:
if not params:
params = {}
try:
orders: list[dict] = self._api.fetch_orders(pair, since=since_ms, params=params)
orders: list[CcxtOrder] = self._api.fetch_orders(
pair, since=since_ms, params=params
)
except ccxt.NotSupported:
# Some exchanges don't support fetchOrders
# attempt to fetch open and closed orders separately
@@ -1757,7 +1768,7 @@ class Exchange:
raise OperationalException(e) from e
@retrier
def fetch_bids_asks(self, symbols: Optional[list[str]] = None, cached: bool = False) -> dict:
def fetch_bids_asks(self, symbols: list[str] | None = None, *, cached: bool = False) -> dict:
"""
:param symbols: List of symbols to fetch
:param cached: Allow cached result
@@ -1790,8 +1801,9 @@ class Exchange:
raise OperationalException(e) from e
@retrier
def get_tickers(self, symbols: Optional[list[str]] = None, cached: bool = False) -> Tickers:
def get_tickers(self, symbols: list[str] | None = None, *, cached: bool = False) -> Tickers:
"""
:param symbols: List of symbols to fetch
:param cached: Allow cached result
:return: fetch_tickers result
"""
@@ -1850,7 +1862,7 @@ class Exchange:
@staticmethod
def get_next_limit_in_list(
limit: int, limit_range: Optional[list[int]], range_required: bool = True
limit: int, limit_range: list[int] | None, range_required: bool = True
):
"""
Get next greater value in the list.
@@ -1914,8 +1926,8 @@ class Exchange:
refresh: bool,
side: EntryExit,
is_short: bool,
order_book: Optional[OrderBook] = None,
ticker: Optional[Ticker] = None,
order_book: OrderBook | None = None,
ticker: Ticker | None = None,
) -> float:
"""
Calculates bid/ask target
@@ -1964,7 +1976,7 @@ class Exchange:
def _get_rate_from_ticker(
self, side: EntryExit, ticker: Ticker, conf_strategy: dict[str, Any], price_side: BidAsk
) -> Optional[float]:
) -> float | None:
"""
Get rate from ticker.
"""
@@ -2043,7 +2055,7 @@ class Exchange:
@retrier
def get_trades_for_order(
self, order_id: str, pair: str, since: datetime, params: Optional[dict] = None
self, order_id: str, pair: str, since: datetime, params: dict | None = None
) -> list:
"""
Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id.
@@ -2090,7 +2102,7 @@ class Exchange:
except ccxt.BaseError as e:
raise OperationalException(e) from e
def get_order_id_conditional(self, order: dict[str, Any]) -> str:
def get_order_id_conditional(self, order: CcxtOrder) -> str:
return order["id"]
@retrier
@@ -2139,7 +2151,7 @@ class Exchange:
raise OperationalException(e) from e
@staticmethod
def order_has_fee(order: dict) -> bool:
def order_has_fee(order: CcxtOrder) -> bool:
"""
Verifies if the passed in order dict has the needed keys to extract fees,
and that these keys (currency, cost) are not empty.
@@ -2158,7 +2170,7 @@ class Exchange:
def calculate_fee_rate(
self, fee: dict, symbol: str, cost: float, amount: float
) -> Optional[float]:
) -> float | None:
"""
Calculate fee rate if it's not given by the exchange.
:param fee: ccxt Fee dict - must contain cost / currency / rate
@@ -2197,8 +2209,8 @@ class Exchange:
return round((fee_cost * fee_to_quote_rate) / cost, 8)
def extract_cost_curr_rate(
self, fee: dict, symbol: str, cost: float, amount: float
) -> tuple[float, str, Optional[float]]:
self, fee: dict[str, Any], symbol: str, cost: float, amount: float
) -> tuple[float, str, float | None]:
"""
Extract tuple of cost, currency, rate.
Requires order_has_fee to run first!
@@ -2223,7 +2235,7 @@ class Exchange:
since_ms: int,
candle_type: CandleType,
is_new_pair: bool = False,
until_ms: Optional[int] = None,
until_ms: int | None = None,
) -> DataFrame:
"""
Get candle history using asyncio and returns the list of candles.
@@ -2257,7 +2269,7 @@ class Exchange:
candle_type: CandleType,
is_new_pair: bool = False,
raise_: bool = False,
until_ms: Optional[int] = None,
until_ms: int | None = None,
) -> OHLCVResponse:
"""
Download historic ohlcv
@@ -2302,7 +2314,7 @@ class Exchange:
pair: str,
timeframe: str,
candle_type: CandleType,
since_ms: Optional[int],
since_ms: int | None,
cache: bool,
) -> Coroutine[Any, Any, OHLCVResponse]:
not_all_data = cache and self.required_candle_call_count > 1
@@ -2371,7 +2383,7 @@ class Exchange:
)
def _build_ohlcv_dl_jobs(
self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool
self, pair_list: ListPairsWithTimeframes, since_ms: int | None, cache: bool
) -> tuple[list[Coroutine], list[PairWithTimeframe]]:
"""
Build Coroutines to execute as part of refresh_latest_ohlcv
@@ -2448,9 +2460,9 @@ class Exchange:
self,
pair_list: ListPairsWithTimeframes,
*,
since_ms: Optional[int] = None,
since_ms: int | None = None,
cache: bool = True,
drop_incomplete: Optional[bool] = None,
drop_incomplete: bool | None = None,
) -> dict[PairWithTimeframe, DataFrame]:
"""
Refresh in-memory OHLCV asynchronously and set `_klines` with the result
@@ -2544,7 +2556,7 @@ class Exchange:
pair: str,
timeframe: str,
candle_type: CandleType,
since_ms: Optional[int] = None,
since_ms: int | None = None,
) -> OHLCVResponse:
"""
Asynchronously get candle history data using fetch_ohlcv
@@ -2618,7 +2630,7 @@ class Exchange:
pair: str,
timeframe: str,
limit: int,
since_ms: Optional[int] = None,
since_ms: int | None = None,
) -> list[list]:
"""
Fetch funding rate history - used to selectively override this by subclasses.
@@ -2677,7 +2689,7 @@ class Exchange:
async def _build_trades_dl_jobs(
self, pairwt: PairWithTimeframe, data_handler, cache: bool
) -> tuple[PairWithTimeframe, Optional[DataFrame]]:
) -> tuple[PairWithTimeframe, DataFrame | None]:
"""
Build coroutines to refresh trades for (they're then called through async.gather)
"""
@@ -2821,7 +2833,7 @@ class Exchange:
@retrier_async
async def _async_fetch_trades(
self, pair: str, since: Optional[int] = None, params: Optional[dict] = None
self, pair: str, since: int | None = None, params: dict | None = None
) -> tuple[list[list], Any]:
"""
Asynchronously gets trade history using fetch_trades.
@@ -2880,8 +2892,16 @@ class Exchange:
else:
return trades[-1].get("timestamp")
async def _async_get_trade_history_id_startup(
self, pair: str, since: int | None
) -> tuple[list[list], str]:
"""
override for initial trade_history_id call
"""
return await self._async_fetch_trades(pair, since=since)
async def _async_get_trade_history_id(
self, pair: str, until: int, since: Optional[int] = None, from_id: Optional[str] = None
self, pair: str, until: int, since: int | None = None, from_id: str | None = None
) -> tuple[str, list[list]]:
"""
Asynchronously gets trade history using fetch_trades
@@ -2906,7 +2926,7 @@ class Exchange:
# of up to an hour.
# e.g. Binance returns the "last 1000" candles within a 1h time interval
# - so we will miss the first trades.
t, from_id = await self._async_fetch_trades(pair, since=since)
t, from_id = await self._async_get_trade_history_id_startup(pair, since=since)
trades.extend(t[x])
while True:
try:
@@ -2936,7 +2956,7 @@ class Exchange:
return (pair, trades)
async def _async_get_trade_history_time(
self, pair: str, until: int, since: Optional[int] = None
self, pair: str, until: int, since: int | None = None
) -> tuple[str, list[list]]:
"""
Asynchronously gets trade history using fetch_trades,
@@ -2977,9 +2997,9 @@ class Exchange:
async def _async_get_trade_history(
self,
pair: str,
since: Optional[int] = None,
until: Optional[int] = None,
from_id: Optional[str] = None,
since: int | None = None,
until: int | None = None,
from_id: str | None = None,
) -> tuple[str, list[list]]:
"""
Async wrapper handling downloading trades using either time or id based methods.
@@ -3008,9 +3028,9 @@ class Exchange:
def get_historic_trades(
self,
pair: str,
since: Optional[int] = None,
until: Optional[int] = None,
from_id: Optional[str] = None,
since: int | None = None,
until: int | None = None,
from_id: str | None = None,
) -> tuple[str, list]:
"""
Get trade history data using asyncio.
@@ -3039,7 +3059,7 @@ class Exchange:
return self.loop.run_until_complete(task)
@retrier
def _get_funding_fees_from_exchange(self, pair: str, since: Union[datetime, int]) -> float:
def _get_funding_fees_from_exchange(self, pair: str, since: datetime | int) -> float:
"""
Returns the sum of all funding fees that were exchanged for a pair within a timeframe
Dry-run handling happens as part of _calculate_funding_fees.
@@ -3170,8 +3190,8 @@ class Exchange:
file_dump_json(filename, data)
def load_cached_leverage_tiers(
self, stake_currency: str, cache_time: Optional[timedelta] = None
) -> Optional[dict[str, list[dict]]]:
self, stake_currency: str, cache_time: timedelta | None = None
) -> dict[str, list[dict]] | None:
"""
Load cached leverage tiers from disk
:param cache_time: The maximum age of the cache before it is considered outdated
@@ -3216,7 +3236,7 @@ class Exchange:
"maintAmt": float(info["cum"]) if "cum" in info else None,
}
def get_max_leverage(self, pair: str, stake_amount: Optional[float]) -> float:
def get_max_leverage(self, pair: str, stake_amount: float | None) -> float:
"""
Returns the maximum leverage that a pair can be traded at
:param pair: The base/quote currency pair being traded
@@ -3294,7 +3314,7 @@ class Exchange:
def _set_leverage(
self,
leverage: float,
pair: Optional[str] = None,
pair: str | None = None,
accept_fail: bool = False,
):
"""
@@ -3346,7 +3366,7 @@ class Exchange:
pair: str,
margin_mode: MarginMode,
accept_fail: bool = False,
params: Optional[dict] = None,
params: dict | None = None,
):
"""
Set's the margin mode on the exchange to cross or isolated for a specific pair
@@ -3381,7 +3401,7 @@ class Exchange:
amount: float,
is_short: bool,
open_date: datetime,
close_date: Optional[datetime] = None,
close_date: datetime | None = None,
) -> float:
"""
Fetches and calculates the sum of all funding fees that occurred for a pair
@@ -3434,7 +3454,7 @@ class Exchange:
@staticmethod
def combine_funding_and_mark(
funding_rates: DataFrame, mark_rates: DataFrame, futures_funding_rate: Optional[int] = None
funding_rates: DataFrame, mark_rates: DataFrame, futures_funding_rate: int | None = None
) -> DataFrame:
"""
Combine funding-rates and mark-rates dataframes
@@ -3475,7 +3495,7 @@ class Exchange:
is_short: bool,
open_date: datetime,
close_date: datetime,
time_in_ratio: Optional[float] = None,
time_in_ratio: float | None = None,
) -> float:
"""
calculates the sum of all funding fees that occurred for a pair during a futures trade
@@ -3533,8 +3553,8 @@ class Exchange:
stake_amount: float,
leverage: float,
wallet_balance: float,
open_trades: Optional[list] = None,
) -> Optional[float]:
open_trades: list | None = None,
) -> float | None:
"""
Set's the margin mode on the exchange to cross or isolated for a specific pair
"""
@@ -3582,7 +3602,7 @@ class Exchange:
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> Optional[float]:
) -> float | None:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
PERPETUAL:
@@ -3633,7 +3653,7 @@ class Exchange:
self,
pair: str,
notional_value: float,
) -> tuple[float, Optional[float]]:
) -> tuple[float, float | None]:
"""
Important: Must be fetching data from cached values as this is used by backtesting!
:param pair: Market symbol

View File

@@ -1,4 +1,4 @@
from typing import Optional, TypedDict
from typing import Any, Literal, TypedDict
from freqtrade.enums import CandleType
@@ -11,7 +11,7 @@ class FtHas(TypedDict, total=False):
# Stoploss on exchange
stoploss_on_exchange: bool
stop_price_param: str
stop_price_prop: str
stop_price_prop: Literal["stopPrice", "stopLossPrice"]
stop_price_type_field: str
stop_price_type_value_mapping: dict
stoploss_order_types: dict[str, str]
@@ -35,16 +35,17 @@ class FtHas(TypedDict, total=False):
trades_has_history: bool
trades_pagination_overlap: bool
# Orderbook
l2_limit_range: Optional[list[int]]
l2_limit_range: list[int] | None
l2_limit_range_required: bool
# Futures
ccxt_futures_name: str # usually swap
mark_ohlcv_price: str
mark_ohlcv_timeframe: str
funding_fee_timeframe: str
funding_fee_candle_limit: int
floor_leverage: bool
needs_trading_fees: bool
order_props_in_contracts: list[str]
order_props_in_contracts: list[Literal["amount", "cost", "filled", "remaining"]]
# Websocket control
ws_enabled: bool
@@ -52,14 +53,14 @@ class FtHas(TypedDict, total=False):
class Ticker(TypedDict):
symbol: str
ask: Optional[float]
askVolume: Optional[float]
bid: Optional[float]
bidVolume: Optional[float]
last: Optional[float]
quoteVolume: Optional[float]
baseVolume: Optional[float]
percentage: Optional[float]
ask: float | None
askVolume: float | None
bid: float | None
bidVolume: float | None
last: float | None
quoteVolume: float | None
baseVolume: float | None
percentage: float | None
# Several more - only listing required.
@@ -70,9 +71,9 @@ class OrderBook(TypedDict):
symbol: str
bids: list[tuple[float, float]]
asks: list[tuple[float, float]]
timestamp: Optional[int]
datetime: Optional[str]
nonce: Optional[int]
timestamp: int | None
datetime: str | None
nonce: int | None
class CcxtBalance(TypedDict):
@@ -89,10 +90,12 @@ class CcxtPosition(TypedDict):
side: str
contracts: float
leverage: float
collateral: Optional[float]
initialMargin: Optional[float]
liquidationPrice: Optional[float]
collateral: float | None
initialMargin: float | None
liquidationPrice: float | None
CcxtOrder = dict[str, Any]
# pair, timeframe, candleType, OHLCV, drop last?,
OHLCVResponse = tuple[str, str, CandleType, list, bool]

View File

@@ -5,7 +5,7 @@ Exchange support utils
import inspect
from datetime import datetime, timedelta, timezone
from math import ceil, floor
from typing import Any, Optional
from typing import Any
import ccxt
from ccxt import (
@@ -33,20 +33,18 @@ from freqtrade.util import FtPrecise
CcxtModuleType = Any
def is_exchange_known_ccxt(
exchange_name: str, ccxt_module: Optional[CcxtModuleType] = None
) -> bool:
def is_exchange_known_ccxt(exchange_name: str, ccxt_module: CcxtModuleType | None = None) -> bool:
return exchange_name in ccxt_exchanges(ccxt_module)
def ccxt_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> list[str]:
def ccxt_exchanges(ccxt_module: CcxtModuleType | None = None) -> list[str]:
"""
Return the list of all exchanges known to ccxt
"""
return ccxt_module.exchanges if ccxt_module is not None else ccxt.exchanges
def available_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> list[str]:
def available_exchanges(ccxt_module: CcxtModuleType | None = None) -> list[str]:
"""
Return exchanges available to the bot, i.e. non-bad exchanges in the ccxt list
"""
@@ -54,7 +52,7 @@ def available_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> list[st
return [x for x in exchanges if validate_exchange(x)[0]]
def validate_exchange(exchange: str) -> tuple[bool, str, Optional[ccxt.Exchange]]:
def validate_exchange(exchange: str) -> tuple[bool, str, ccxt.Exchange | None]:
"""
returns: can_use, reason, exchange_object
with Reason including both missing and missing_opt
@@ -137,9 +135,7 @@ def list_available_exchanges(all_exchanges: bool) -> list[ValidExchangesType]:
return exchanges_valid
def date_minus_candles(
timeframe: str, candle_count: int, date: Optional[datetime] = None
) -> datetime:
def date_minus_candles(timeframe: str, candle_count: int, date: datetime | None = None) -> datetime:
"""
subtract X candles from a date.
:param timeframe: timeframe in string format (e.g. "5m")
@@ -166,7 +162,7 @@ def market_is_active(market: dict) -> bool:
return market.get("active", True) is not False
def amount_to_contracts(amount: float, contract_size: Optional[float]) -> float:
def amount_to_contracts(amount: float, contract_size: float | None) -> float:
"""
Convert amount to contracts.
:param amount: amount to convert
@@ -179,7 +175,7 @@ def amount_to_contracts(amount: float, contract_size: Optional[float]) -> float:
return amount
def contracts_to_amount(num_contracts: float, contract_size: Optional[float]) -> float:
def contracts_to_amount(num_contracts: float, contract_size: float | None) -> float:
"""
Takes num-contracts and converts it to contract size
:param num_contracts: number of contracts
@@ -194,7 +190,7 @@ def contracts_to_amount(num_contracts: float, contract_size: Optional[float]) ->
def amount_to_precision(
amount: float, amount_precision: Optional[float], precisionMode: Optional[int]
amount: float, amount_precision: float | None, precisionMode: int | None
) -> float:
"""
Returns the amount to buy or sell to a precision the Exchange accepts
@@ -224,9 +220,9 @@ def amount_to_precision(
def amount_to_contract_precision(
amount,
amount_precision: Optional[float],
precisionMode: Optional[int],
contract_size: Optional[float],
amount_precision: float | None,
precisionMode: int | None,
contract_size: float | None,
) -> float:
"""
Returns the amount to buy or sell to a precision the Exchange accepts
@@ -285,8 +281,8 @@ def __price_to_precision_significant_digits(
def price_to_precision(
price: float,
price_precision: Optional[float],
precisionMode: Optional[int],
price_precision: float | None,
precisionMode: int | None,
*,
rounding_mode: int = ROUND,
) -> float:

View File

@@ -1,5 +1,4 @@
from datetime import datetime, timezone
from typing import Optional
import ccxt
from ccxt import ROUND_DOWN, ROUND_UP
@@ -51,7 +50,7 @@ def timeframe_to_resample_freq(timeframe: str) -> str:
return resample_interval
def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
def timeframe_to_prev_date(timeframe: str, date: datetime | None = None) -> datetime:
"""
Use Timeframe and determine the candle start date for this date.
Does not round when given a candle start date.
@@ -66,7 +65,7 @@ def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> d
return dt_from_ts(new_timestamp)
def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
def timeframe_to_next_date(timeframe: str, date: datetime | None = None) -> datetime:
"""
Use Timeframe and determine next candle.
:param timeframe: timeframe in string format (e.g. "5m")

View File

@@ -2,12 +2,15 @@
import logging
from datetime import datetime
from typing import Any, Optional
import ccxt
from freqtrade.constants import BuySell
from freqtrade.enums import MarginMode, PriceType, TradingMode
from freqtrade.exceptions import DDosProtection, OperationalException, TemporaryError
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.common import retrier
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.misc import safe_value_fallback2
@@ -24,6 +27,8 @@ class Gate(Exchange):
may still not work as expected.
"""
unified_account = False
_ft_has: FtHas = {
"ohlcv_candle_limit": 1000,
"order_time_in_force": ["GTC", "IOC"],
@@ -38,6 +43,7 @@ class Gate(Exchange):
_ft_has_futures: FtHas = {
"needs_trading_fees": True,
"marketOrderRequiresPrice": False,
"funding_fee_candle_limit": 90,
"stop_price_type_field": "price_type",
"stop_price_type_value_mapping": {
PriceType.LAST: 0,
@@ -53,6 +59,35 @@ class Gate(Exchange):
(TradingMode.FUTURES, MarginMode.ISOLATED)
]
@retrier
def additional_exchange_init(self) -> None:
"""
Additional exchange initialization logic.
.api will be available at this point.
Must be overridden in child methods if required.
"""
try:
if not self._config["dry_run"]:
# TODO: This should work with 4.4.34 and later.
self._api.load_unified_status()
is_unified = self._api.options.get("unifiedAccount")
# Returns a tuple of bools, first for margin, second for Account
if is_unified:
self.unified_account = True
logger.info("Gate: Unified account.")
else:
self.unified_account = False
logger.info("Gate: Classic account.")
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
raise TemporaryError(
f"Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}"
) from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
def _get_params(
self,
side: BuySell,
@@ -74,7 +109,7 @@ class Gate(Exchange):
return params
def get_trades_for_order(
self, order_id: str, pair: str, since: datetime, params: Optional[dict] = None
self, order_id: str, pair: str, since: datetime, params: dict | None = None
) -> list:
trades = super().get_trades_for_order(order_id, pair, since, params)
@@ -99,10 +134,12 @@ class Gate(Exchange):
}
return trades
def get_order_id_conditional(self, order: dict[str, Any]) -> str:
def get_order_id_conditional(self, order: CcxtOrder) -> str:
return safe_value_fallback2(order, order, "id_stop", "id")
def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
order = self.fetch_order(order_id=order_id, pair=pair, params={"stop": True})
if order.get("status", "open") == "closed":
# Places a real order - which we need to fetch explicitly.
@@ -119,7 +156,5 @@ class Gate(Exchange):
return order1
return order
def cancel_stoploss_order(
self, order_id: str, pair: str, params: Optional[dict] = None
) -> dict:
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True})

View File

@@ -1,10 +1,14 @@
"""Hyperliquid exchange subclass"""
import logging
from datetime import datetime
from freqtrade.enums import TradingMode
from freqtrade.constants import BuySell
from freqtrade.enums import MarginMode, TradingMode
from freqtrade.exceptions import ExchangeError, OperationalException
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.util.datetime_helpers import dt_from_ts
logger = logging.getLogger(__name__)
@@ -16,20 +20,162 @@ class Hyperliquid(Exchange):
"""
_ft_has: FtHas = {
# Only the most recent 5000 candles are available according to the
# exchange's API documentation.
"ohlcv_has_history": False,
"ohlcv_candle_limit": 5000,
"trades_has_history": False, # Trades endpoint doesn't seem available.
"l2_limit_range": [20],
"trades_has_history": False,
"tickers_have_bid_ask": False,
"stoploss_on_exchange": False,
"exchange_has_overrides": {"fetchTrades": False},
"marketOrderRequiresPrice": True,
}
_ft_has_futures: FtHas = {
"stoploss_on_exchange": True,
"stoploss_order_types": {"limit": "limit"},
"stop_price_prop": "stopPrice",
"funding_fee_timeframe": "1h",
"funding_fee_candle_limit": 500,
}
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [
(TradingMode.FUTURES, MarginMode.ISOLATED)
]
@property
def _ccxt_config(self) -> dict:
# Parameters to add directly to ccxt sync/async initialization.
# ccxt defaults to swap mode.
# ccxt Hyperliquid defaults to swap
config = {}
if self.trading_mode == TradingMode.SPOT:
config.update({"options": {"defaultType": "spot"}})
config.update(super()._ccxt_config)
return config
def get_max_leverage(self, pair: str, stake_amount: float | None) -> float:
# There are no leverage tiers
if self.trading_mode == TradingMode.FUTURES:
return self.markets[pair]["limits"]["leverage"]["max"]
else:
return 1.0
def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False):
if self.trading_mode != TradingMode.SPOT:
# Hyperliquid expects leverage to be an int
leverage = int(leverage)
# Hyperliquid needs the parameter leverage.
# Don't use _set_leverage(), as this sets margin back to cross
self.set_margin_mode(pair, self.margin_mode, params={"leverage": leverage})
def dry_run_liquidation_price(
self,
pair: str,
open_rate: float, # Entry price of position
is_short: bool,
amount: float,
stake_amount: float,
leverage: float,
wallet_balance: float, # Or margin balance
open_trades: list,
) -> float | None:
"""
Optimized
Docs: https://hyperliquid.gitbook.io/hyperliquid-docs/trading/liquidations
Below can be done in fewer lines of code, but like this it matches the documentation.
Tested with 196 unique ccxt fetch_positions() position outputs
- Only first output per position where pnl=0.0
- Compare against returned liquidation price
Positions: 197 Average deviation: 0.00028980% Max deviation: 0.01309453%
Positions info:
{'leverage': {1.0: 23, 2.0: 155, 3.0: 8, 4.0: 7, 5.0: 4},
'side': {'long': 133, 'short': 64},
'symbol': {'BTC/USDC:USDC': 81,
'DOGE/USDC:USDC': 20,
'ETH/USDC:USDC': 53,
'SOL/USDC:USDC': 43}}
"""
# Defining/renaming variables to match the documentation
isolated_margin = wallet_balance
position_size = amount
price = open_rate
position_value = price * position_size
max_leverage = self.markets[pair]["limits"]["leverage"]["max"]
# Docs: The maintenance margin is half of the initial margin at max leverage,
# which varies from 3-50x. In other words, the maintenance margin is between 1%
# (for 50x max leverage assets) and 16.7% (for 3x max leverage assets)
# depending on the asset
# The key thing here is 'Half of the initial margin at max leverage'.
# A bit ambiguous, but this interpretation leads to accurate results:
# 1. Start from the position value
# 2. Assume max leverage, calculate the initial margin by dividing the position value
# by the max leverage
# 3. Divide this by 2
maintenance_margin_required = position_value / max_leverage / 2
# Docs: margin_available (isolated) = isolated_margin - maintenance_margin_required
margin_available = isolated_margin - maintenance_margin_required
# Docs: The maintenance margin is half of the initial margin at max leverage
# The docs don't explicitly specify maintenance leverage, but this works.
# Double because of the statement 'half of the initial margin at max leverage'
maintenance_leverage = max_leverage * 2
# Docs: l = 1 / MAINTENANCE_LEVERAGE (Using 'll' to comply with PEP8: E741)
ll = 1 / maintenance_leverage
# Docs: side = 1 for long and -1 for short
side = -1 if is_short else 1
# Docs: liq_price = price - side * margin_available / position_size / (1 - l * side)
liq_price = price - side * margin_available / position_size / (1 - ll * side)
if self.trading_mode == TradingMode.FUTURES:
return liq_price
else:
raise OperationalException(
"Freqtrade only supports isolated futures for leverage trading"
)
def get_funding_fees(
self, pair: str, amount: float, is_short: bool, open_date: datetime
) -> float:
"""
Fetch funding fees, either from the exchange (live) or calculates them
based on funding rate/mark price history
:param pair: The quote/base pair of the trade
:param is_short: trade direction
:param amount: Trade amount
:param open_date: Open date of the trade
:return: funding fee since open_date
:raises: ExchangeError if something goes wrong.
"""
# Hyperliquid does not have fetchFundingHistory
if self.trading_mode == TradingMode.FUTURES:
try:
return self._fetch_and_calculate_funding_fees(pair, amount, is_short, open_date)
except ExchangeError:
logger.warning(f"Could not update funding fees for {pair}.")
return 0.0
def fetch_order(self, order_id: str, pair: str, params: dict | None = None) -> CcxtOrder:
order = super().fetch_order(order_id, pair, params)
if (
order["average"] is None
and order["status"] in ("canceled", "closed")
and order["filled"] > 0
):
# Hyperliquid does not fill the average price in the order response
# Fetch trades to calculate the average price to have the actual price
# the order was executed at
trades = self.get_trades_for_order(order_id, pair, since=dt_from_ts(order["timestamp"]))
if trades:
total_amount = sum(t["amount"] for t in trades)
order["average"] = (
sum(t["price"] * t["amount"] for t in trades) / total_amount
if total_amount
else None
)
return order

View File

@@ -2,7 +2,7 @@
import logging
from datetime import datetime
from typing import Any, Optional
from typing import Any
import ccxt
from pandas import DataFrame
@@ -50,12 +50,28 @@ class Kraken(Exchange):
return parent_check and market.get("darkpool", False) is False
def get_tickers(self, symbols: Optional[list[str]] = None, cached: bool = False) -> Tickers:
def get_tickers(self, symbols: list[str] | None = None, *, cached: bool = False) -> Tickers:
# Only fetch tickers for current stake currency
# Otherwise the request for kraken becomes too large.
symbols = list(self.get_markets(quote_currencies=[self._config["stake_currency"]]))
return super().get_tickers(symbols=symbols, cached=cached)
def consolidate_balances(self, balances: CcxtBalances) -> CcxtBalances:
"""
Consolidate balances for the same currency.
Kraken returns ".F" balances if rewards is enabled.
"""
consolidated: CcxtBalances = {}
for currency, balance in balances.items():
base_currency = currency[:-2] if currency.endswith(".F") else currency
if base_currency in consolidated:
consolidated[base_currency]["free"] += balance["free"]
consolidated[base_currency]["used"] += balance["used"]
consolidated[base_currency]["total"] += balance["total"]
else:
consolidated[base_currency] = balance
return consolidated
@retrier
def get_balances(self) -> CcxtBalances:
if self._config["dry_run"]:
@@ -68,6 +84,10 @@ class Kraken(Exchange):
balances.pop("free", None)
balances.pop("total", None)
balances.pop("used", None)
self._log_exchange_response("fetch_balances", balances)
# Consolidate balances
balances = self.consolidate_balances(balances)
orders = self._api.fetch_open_orders()
order_list = [
@@ -86,6 +106,7 @@ class Kraken(Exchange):
balances[bal]["used"] = sum(order[1] for order in order_list if order[0] == bal)
balances[bal]["free"] = balances[bal]["total"] - balances[bal]["used"]
self._log_exchange_response("fetch_balances2", balances)
return balances
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
@@ -99,7 +120,7 @@ class Kraken(Exchange):
def _set_leverage(
self,
leverage: float,
pair: Optional[str] = None,
pair: str | None = None,
accept_fail: bool = False,
):
"""
@@ -137,7 +158,7 @@ class Kraken(Exchange):
is_short: bool,
open_date: datetime,
close_date: datetime,
time_in_ratio: Optional[float] = None,
time_in_ratio: float | None = None,
) -> float:
"""
# ! This method will always error when run by Freqtrade because time_in_ratio is never

View File

@@ -4,7 +4,7 @@ import logging
from freqtrade.constants import BuySell
from freqtrade.exchange import Exchange
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
logger = logging.getLogger(__name__)
@@ -47,7 +47,7 @@ class Kucoin(Exchange):
leverage: float,
reduceOnly: bool = False,
time_in_force: str = "GTC",
) -> dict:
) -> CcxtOrder:
res = super().create_order(
pair=pair,
ordertype=ordertype,

View File

@@ -1,6 +1,5 @@
import logging
from datetime import timedelta
from typing import Any, Optional
import ccxt
@@ -14,7 +13,7 @@ from freqtrade.exceptions import (
)
from freqtrade.exchange import Exchange, date_minus_candles
from freqtrade.exchange.common import API_RETRY_COUNT, retrier
from freqtrade.exchange.exchange_types import FtHas
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
from freqtrade.misc import safe_value_fallback2
from freqtrade.util import dt_now, dt_ts
@@ -60,7 +59,7 @@ class Okx(Exchange):
_ccxt_params: dict = {"options": {"brokerId": "ffb5405ad327SUDE"}}
def ohlcv_candle_limit(
self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None
self, timeframe: str, candle_type: CandleType, since_ms: int | None = None
) -> int:
"""
Exchange ohlcv candle limit
@@ -191,7 +190,7 @@ class Okx(Exchange):
params["posSide"] = self._get_posSide(side, True)
return params
def _convert_stop_order(self, pair: str, order_id: str, order: dict) -> dict:
def _convert_stop_order(self, pair: str, order_id: str, order: CcxtOrder) -> CcxtOrder:
if (
order.get("status", "open") == "closed"
and (real_order_id := order.get("info", {}).get("ordId")) is not None
@@ -209,7 +208,9 @@ class Okx(Exchange):
return order
@retrier(retries=API_RETRY_COUNT)
def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[dict] = None) -> dict:
def fetch_stoploss_order(
self, order_id: str, pair: str, params: dict | None = None
) -> CcxtOrder:
if self._config["dry_run"]:
return self.fetch_dry_run_order(order_id)
@@ -231,7 +232,7 @@ class Okx(Exchange):
return self._fetch_stop_order_fallback(order_id, pair)
def _fetch_stop_order_fallback(self, order_id: str, pair: str) -> dict:
def _fetch_stop_order_fallback(self, order_id: str, pair: str) -> CcxtOrder:
params2 = {"stop": True, "ordType": "conditional"}
for method in (
self._api.fetch_open_orders,
@@ -256,14 +257,12 @@ class Okx(Exchange):
raise OperationalException(e) from e
raise RetryableOrderError(f"StoplossOrder not found (pair: {pair} id: {order_id}).")
def get_order_id_conditional(self, order: dict[str, Any]) -> str:
def get_order_id_conditional(self, order: CcxtOrder) -> str:
if order.get("type", "") == "stop":
return safe_value_fallback2(order, order, "id_stop", "id")
return order["id"]
def cancel_stoploss_order(
self, order_id: str, pair: str, params: Optional[dict] = None
) -> dict:
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
params1 = {"stop": True}
# 'ordType': 'conditional'
#
@@ -273,7 +272,7 @@ class Okx(Exchange):
params=params1,
)
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[dict]:
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[CcxtOrder]:
orders = []
orders = self._api.fetch_closed_orders(pair, since=since_ms)

View File

@@ -2,7 +2,6 @@ import logging
import random
from abc import abstractmethod
from enum import Enum
from typing import Optional, Union
import gymnasium as gym
import numpy as np
@@ -140,7 +139,7 @@ class BaseEnvironment(gym.Env):
self._end_tick: int = len(self.prices) - 1
self._done: bool = False
self._current_tick: int = self._start_tick
self._last_trade_tick: Optional[int] = None
self._last_trade_tick: int | None = None
self._position = Positions.Neutral
self._position_history: list = [None]
self.total_reward: float = 0
@@ -173,8 +172,8 @@ class BaseEnvironment(gym.Env):
def tensorboard_log(
self,
metric: str,
value: Optional[Union[int, float]] = None,
inc: Optional[bool] = None,
value: int | float | None = None,
inc: bool | None = None,
category: str = "custom",
):
"""

View File

@@ -2,9 +2,10 @@ import copy
import importlib
import logging
from abc import abstractmethod
from collections.abc import Callable
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Callable, Optional, Union
from typing import Any
import gymnasium as gym
import numpy as np
@@ -49,9 +50,9 @@ class BaseReinforcementLearningModel(IFreqaiModel):
)
th.set_num_threads(self.max_threads)
self.reward_params = self.freqai_info["rl_config"]["model_reward_parameters"]
self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_callback: Optional[MaskableEvalCallback] = None
self.train_env: VecMonitor | SubprocVecEnv | gym.Env = gym.Env()
self.eval_env: VecMonitor | SubprocVecEnv | gym.Env = gym.Env()
self.eval_callback: MaskableEvalCallback | None = None
self.model_type = self.freqai_info["rl_config"]["model_type"]
self.rl_config = self.freqai_info["rl_config"]
self.df_raw: DataFrame = DataFrame()

View File

@@ -5,7 +5,7 @@ import random
import shutil
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
from typing import Any
import numpy as np
import numpy.typing as npt
@@ -111,7 +111,7 @@ class FreqaiDataKitchen:
def set_paths(
self,
pair: str,
trained_timestamp: Optional[int] = None,
trained_timestamp: int | None = None,
) -> None:
"""
Set the paths to the data for the present coin/botloop

View File

@@ -5,7 +5,7 @@ from abc import ABC, abstractmethod
from collections import deque
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Literal, Optional
from typing import Any, Literal
import datasieve.transforms as ds
import numpy as np
@@ -106,7 +106,7 @@ class IFreqaiModel(ABC):
self._threads: list[threading.Thread] = []
self._stop_event = threading.Event()
self.metadata: dict[str, Any] = self.dd.load_global_metadata_from_disk()
self.data_provider: Optional[DataProvider] = None
self.data_provider: DataProvider | None = None
self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1)
self.can_short = True # overridden in start() with strategy.can_short
self.model: Any = None
@@ -294,7 +294,9 @@ class IFreqaiModel(ABC):
# tr_backtest is the backtesting time range e.g. the week directly
# following tr_train. Both of these windows slide through the
# entire backtest
for tr_train, tr_backtest in zip(dk.training_timeranges, dk.backtesting_timeranges):
for tr_train, tr_backtest in zip(
dk.training_timeranges, dk.backtesting_timeranges, strict=False
):
(_, _) = self.dd.get_pair_dict_info(pair)
train_it += 1
total_trains = len(dk.backtesting_timeranges)

View File

@@ -1,6 +1,6 @@
import logging
from pathlib import Path
from typing import Any, Optional
from typing import Any
import torch as th
from stable_baselines3.common.callbacks import ProgressBarCallback
@@ -78,7 +78,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
model = self.dd.model_dictionary[dk.pair]
model.set_env(self.train_env)
callbacks: list[Any] = [self.eval_callback, self.tensorboard_callback]
progressbar_callback: Optional[ProgressBarCallback] = None
progressbar_callback: ProgressBarCallback | None = None
if self.rl_config.get("progress_bar", False):
progressbar_callback = ProgressBarCallback()
callbacks.insert(0, progressbar_callback)

View File

@@ -1,5 +1,5 @@
from enum import Enum
from typing import Any, Union
from typing import Any
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.logger import HParam
@@ -27,7 +27,7 @@ class TensorboardCallback(BaseCallback):
# "batch_size": self.model.batch_size,
# "n_steps": self.model.n_steps,
}
metric_dict: dict[str, Union[float, int]] = {
metric_dict: dict[str, float | int] = {
"eval/mean_reward": 0,
"rollout/ep_rew_mean": 0,
"rollout/ep_len_mean": 0,

View File

@@ -45,7 +45,7 @@ class TensorBoardCallback(BaseTensorBoardCallback):
return False
evals = ["validation", "train"]
for metric, eval_ in zip(evals_log.items(), evals):
for metric, eval_ in zip(evals_log.items(), evals, strict=False):
for metric_name, log in metric[1].items():
score = log[-1][0] if isinstance(log[-1], tuple) else log[-1]
self.writer.add_scalar(f"{eval_}-{metric_name}", score, epoch)

View File

@@ -1,6 +1,6 @@
import logging
from pathlib import Path
from typing import Any, Optional
from typing import Any
import pandas as pd
import torch
@@ -50,8 +50,8 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
self.criterion = criterion
self.model_meta_data = model_meta_data
self.device = device
self.n_epochs: Optional[int] = kwargs.get("n_epochs", 10)
self.n_steps: Optional[int] = kwargs.get("n_steps", None)
self.n_epochs: int | None = kwargs.get("n_epochs", 10)
self.n_steps: int | None = kwargs.get("n_steps", None)
if self.n_steps is None and not self.n_epochs:
raise Exception("Either `n_steps` or `n_epochs` should be set.")

View File

@@ -107,7 +107,7 @@ def plot_feature_importance(
# Extract feature importance from model
models = {}
if "FreqaiMultiOutputRegressor" in str(model.__class__):
for estimator, label in zip(model.estimators_, dk.label_list):
for estimator, label in zip(model.estimators_, dk.label_list, strict=False):
models[label] = estimator
else:
models[dk.label_list[0]] = model

View File

@@ -9,7 +9,7 @@ from datetime import datetime, time, timedelta, timezone
from math import isclose
from threading import Lock
from time import sleep
from typing import Any, Optional
from typing import Any
from schedule import Scheduler
@@ -43,6 +43,7 @@ from freqtrade.exchange import (
timeframe_to_next_date,
timeframe_to_seconds,
)
from freqtrade.exchange.exchange_types import CcxtOrder
from freqtrade.leverage.liquidation_price import update_liquidation_prices
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
from freqtrade.mixins import LoggingMixin
@@ -111,7 +112,7 @@ class FreqtradeBot(LoggingMixin):
self.trading_mode: TradingMode = self.config.get("trading_mode", TradingMode.SPOT)
self.margin_mode: MarginMode = self.config.get("margin_mode", MarginMode.NONE)
self.last_process: Optional[datetime] = None
self.last_process: datetime | None = None
# RPC runs in separate threads, can start handling external commands just after
# initialization, even before Freqtradebot has a chance to start its throttling,
@@ -325,7 +326,7 @@ class FreqtradeBot(LoggingMixin):
}
self.rpc.send_msg(msg)
def _refresh_active_whitelist(self, trades: Optional[list[Trade]] = None) -> list[str]:
def _refresh_active_whitelist(self, trades: list[Trade] | None = None) -> list[str]:
"""
Refresh active whitelist from pairlist or edge and extend it with
pairs that have open trades.
@@ -579,7 +580,7 @@ class FreqtradeBot(LoggingMixin):
logger.warning(
f"{trade} has a total of {trade.amount} {trade.base_currency}, "
f"but the Wallet shows a total of {total} {trade.base_currency}. "
f"Adjusting trade amount to {total}."
f"Adjusting trade amount to {total}. "
"This may however lead to further issues."
)
trade.amount = total
@@ -587,7 +588,7 @@ class FreqtradeBot(LoggingMixin):
logger.warning(
f"{trade} has a total of {trade.amount} {trade.base_currency}, "
f"but the Wallet shows a total of {total} {trade.base_currency}. "
"Refusing to adjust as the difference is too large."
"Refusing to adjust as the difference is too large. "
"This may however lead to further issues."
)
if prev_trade_amount != trade.amount:
@@ -862,14 +863,14 @@ class FreqtradeBot(LoggingMixin):
self,
pair: str,
stake_amount: float,
price: Optional[float] = None,
price: float | None = None,
*,
is_short: bool = False,
ordertype: Optional[str] = None,
enter_tag: Optional[str] = None,
trade: Optional[Trade] = None,
ordertype: str | None = None,
enter_tag: str | None = None,
trade: Trade | None = None,
mode: EntryExecuteMode = "initial",
leverage_: Optional[float] = None,
leverage_: float | None = None,
) -> bool:
"""
Executes an entry for the given pair
@@ -1078,13 +1079,13 @@ class FreqtradeBot(LoggingMixin):
def get_valid_enter_price_and_stake(
self,
pair: str,
price: Optional[float],
price: float | None,
stake_amount: float,
trade_side: LongShort,
entry_tag: Optional[str],
trade: Optional[Trade],
entry_tag: str | None,
trade: Trade | None,
mode: EntryExecuteMode,
leverage_: Optional[float],
leverage_: float | None,
) -> tuple[float, float, float]:
"""
Validate and eventually adjust (within limits) limit, amount and leverage
@@ -1180,7 +1181,7 @@ class FreqtradeBot(LoggingMixin):
self,
trade: Trade,
order: Order,
order_type: Optional[str],
order_type: str | None,
fill: bool = False,
sub_trade: bool = False,
) -> None:
@@ -1195,6 +1196,13 @@ class FreqtradeBot(LoggingMixin):
current_rate = self.exchange.get_rate(
trade.pair, side="entry", is_short=trade.is_short, refresh=False
)
stake_amount = trade.stake_amount
if not fill:
# If we have open orders, we need to add the stake amount of the open orders
# as it's not yet included in the trade.stake_amount
stake_amount += sum(
o.stake_amount for o in trade.open_orders if o.ft_order_side == trade.entry_side
)
msg: RPCEntryMsg = {
"trade_id": trade.id,
@@ -1208,12 +1216,12 @@ class FreqtradeBot(LoggingMixin):
"limit": open_rate, # Deprecated (?)
"open_rate": open_rate,
"order_type": order_type or "unknown",
"stake_amount": trade.stake_amount,
"stake_amount": stake_amount,
"stake_currency": self.config["stake_currency"],
"base_currency": self.exchange.get_pair_base_currency(trade.pair),
"quote_currency": self.exchange.get_pair_quote_currency(trade.pair),
"fiat_currency": self.config.get("fiat_display_currency", None),
"amount": order.safe_amount_after_fee if fill else (order.amount or trade.amount),
"amount": order.safe_amount_after_fee if fill else (order.safe_amount or trade.amount),
"open_date": trade.open_date_utc or datetime.now(timezone.utc),
"current_rate": current_rate,
"sub_trade": sub_trade,
@@ -1344,7 +1352,7 @@ class FreqtradeBot(LoggingMixin):
return False
def _check_and_execute_exit(
self, trade: Trade, exit_rate: float, enter: bool, exit_: bool, exit_tag: Optional[str]
self, trade: Trade, exit_rate: float, enter: bool, exit_: bool, exit_tag: str | None
) -> bool:
"""
Check and execute trade exit
@@ -1466,7 +1474,7 @@ class FreqtradeBot(LoggingMixin):
return False
def handle_trailing_stoploss_on_exchange(self, trade: Trade, order: dict) -> None:
def handle_trailing_stoploss_on_exchange(self, trade: Trade, order: CcxtOrder) -> None:
"""
Check to see if stoploss on exchange should be updated
in case of trailing stoploss on exchange
@@ -1504,7 +1512,7 @@ class FreqtradeBot(LoggingMixin):
f"Could not create trailing stoploss order for pair {trade.pair}."
)
def manage_trade_stoploss_orders(self, trade: Trade, stoploss_orders: list[dict]):
def manage_trade_stoploss_orders(self, trade: Trade, stoploss_orders: list[CcxtOrder]):
"""
Perform required actions according to existing stoploss orders of trade
:param trade: Corresponding Trade
@@ -1580,7 +1588,9 @@ class FreqtradeBot(LoggingMixin):
else:
self.replace_order(order, open_order, trade)
def handle_cancel_order(self, order: dict, order_obj: Order, trade: Trade, reason: str) -> None:
def handle_cancel_order(
self, order: CcxtOrder, order_obj: Order, trade: Trade, reason: str
) -> None:
"""
Check if current analyzed order timed out and cancel if necessary.
:param order: Order dict grabbed with exchange.fetch_order()
@@ -1602,7 +1612,7 @@ class FreqtradeBot(LoggingMixin):
self.emergency_exit(trade, order["price"], order["amount"])
def emergency_exit(
self, trade: Trade, price: float, sub_trade_amt: Optional[float] = None
self, trade: Trade, price: float, sub_trade_amt: float | None = None
) -> None:
try:
self.execute_trade_exit(
@@ -1632,7 +1642,7 @@ class FreqtradeBot(LoggingMixin):
)
trade.delete()
def replace_order(self, order: dict, order_obj: Optional[Order], trade: Trade) -> None:
def replace_order(self, order: CcxtOrder, order_obj: Order | None, trade: Trade) -> None:
"""
Check if current analyzed entry order should be replaced or simply cancelled.
To simply cancel the existing order(no replacement) adjust_entry_price() should return None
@@ -1736,10 +1746,10 @@ class FreqtradeBot(LoggingMixin):
def handle_cancel_enter(
self,
trade: Trade,
order: dict,
order: CcxtOrder,
order_obj: Order,
reason: str,
replacing: Optional[bool] = False,
replacing: bool | None = False,
) -> bool:
"""
entry cancel - cancel order
@@ -1820,7 +1830,9 @@ class FreqtradeBot(LoggingMixin):
)
return was_trade_fully_canceled
def handle_cancel_exit(self, trade: Trade, order: dict, order_obj: Order, reason: str) -> bool:
def handle_cancel_exit(
self, trade: Trade, order: CcxtOrder, order_obj: Order, reason: str
) -> bool:
"""
exit order cancel - cancel order and update trade
:return: True if exit order was cancelled, false otherwise
@@ -1931,9 +1943,9 @@ class FreqtradeBot(LoggingMixin):
limit: float,
exit_check: ExitCheckTuple,
*,
exit_tag: Optional[str] = None,
ordertype: Optional[str] = None,
sub_trade_amt: Optional[float] = None,
exit_tag: str | None = None,
ordertype: str | None = None,
sub_trade_amt: float | None = None,
) -> bool:
"""
Executes a trade exit for the given trade and limit
@@ -2042,10 +2054,10 @@ class FreqtradeBot(LoggingMixin):
def _notify_exit(
self,
trade: Trade,
order_type: Optional[str],
order_type: str | None,
fill: bool = False,
sub_trade: bool = False,
order: Optional[Order] = None,
order: Order | None = None,
) -> None:
"""
Sends rpc notification when a sell occurred.
@@ -2158,7 +2170,7 @@ class FreqtradeBot(LoggingMixin):
# Send the message
self.rpc.send_msg(msg)
def order_obj_or_raise(self, order_id: str, order_obj: Optional[Order]) -> Order:
def order_obj_or_raise(self, order_id: str, order_obj: Order | None) -> Order:
if not order_obj:
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened."
@@ -2172,8 +2184,8 @@ class FreqtradeBot(LoggingMixin):
def update_trade_state(
self,
trade: Trade,
order_id: Optional[str],
action_order: Optional[dict[str, Any]] = None,
order_id: str | None,
action_order: CcxtOrder | None = None,
*,
stoploss_order: bool = False,
send_msg: bool = True,
@@ -2284,7 +2296,7 @@ class FreqtradeBot(LoggingMixin):
def handle_protections(self, pair: str, side: LongShort) -> None:
# Lock pair for one candle to prevent immediate re-entries
self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason="Auto lock")
self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason="Auto lock", side=side)
prot_trig = self.protections.stop_per_pair(pair, side=side)
if prot_trig:
msg: RPCProtectionMsg = {
@@ -2310,7 +2322,7 @@ class FreqtradeBot(LoggingMixin):
amount: float,
fee_abs: float,
order_obj: Order,
) -> Optional[float]:
) -> float | None:
"""
Applies the fee to amount (either from Order or from Trades).
Can eat into dust if more than the required asset is available.
@@ -2338,7 +2350,7 @@ class FreqtradeBot(LoggingMixin):
return fee_abs
return None
def handle_order_fee(self, trade: Trade, order_obj: Order, order: dict[str, Any]) -> None:
def handle_order_fee(self, trade: Trade, order_obj: Order, order: CcxtOrder) -> None:
# Try update amount (binance-fix)
try:
fee_abs = self.get_real_amount(trade, order, order_obj)
@@ -2347,7 +2359,7 @@ class FreqtradeBot(LoggingMixin):
except DependencyException as exception:
logger.warning("Could not update trade amount: %s", exception)
def get_real_amount(self, trade: Trade, order: dict, order_obj: Order) -> Optional[float]:
def get_real_amount(self, trade: Trade, order: CcxtOrder, order_obj: Order) -> float | None:
"""
Detect and update trade fee.
Calls trade.update_fee() upon correct detection.
@@ -2407,8 +2419,8 @@ class FreqtradeBot(LoggingMixin):
return True
def fee_detection_from_trades(
self, trade: Trade, order: dict, order_obj: Order, order_amount: float, trades: list
) -> Optional[float]:
self, trade: Trade, order: CcxtOrder, order_obj: Order, order_amount: float, trades: list
) -> float | None:
"""
fee-detection fallback to Trades.
Either uses provided trades list or the result of fetch_my_trades to get correct fee.
@@ -2419,7 +2431,7 @@ class FreqtradeBot(LoggingMixin):
)
if len(trades) == 0:
logger.info("Applying fee on amount for %s failed: myTrade-Dict empty found", trade)
logger.info("Applying fee on amount for %s failed: myTrade-dict empty found", trade)
return None
fee_currency = None
amount = 0

View File

@@ -1,4 +1,4 @@
from typing import Any, Optional
from typing import Any
from typing_extensions import TypedDict
@@ -26,7 +26,7 @@ class BacktestHistoryEntryType(BacktestMetadataType):
filename: str
strategy: str
notes: str
backtest_start_ts: Optional[int]
backtest_end_ts: Optional[int]
timeframe: Optional[str]
timeframe_detail: Optional[str]
backtest_start_ts: int | None
backtest_end_ts: int | None
timeframe: str | None
timeframe_detail: str | None

View File

@@ -1,5 +1,4 @@
# Used for list-exchanges
from typing import Optional
from typing_extensions import TypedDict
@@ -17,5 +16,5 @@ class ValidExchangesType(TypedDict):
comment: str
dex: bool
is_alias: bool
alias_for: Optional[str]
alias_for: str | None
trade_modes: list[TradeModeType]

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
from freqtrade.enums import MarginMode
from freqtrade.exceptions import DependencyException
@@ -12,7 +11,7 @@ logger = logging.getLogger(__name__)
def update_liquidation_prices(
trade: Optional[LocalTrade] = None,
trade: LocalTrade | None = None,
*,
exchange: Exchange,
wallets: Wallets,

View File

@@ -6,11 +6,11 @@ Read the documentation to know what cli arguments you need.
import logging
import sys
from typing import Any, Optional
from typing import Any
# check min. python version
if sys.version_info < (3, 10): # pragma: no cover
if sys.version_info < (3, 10): # pragma: no cover # noqa: UP036
sys.exit("Freqtrade requires Python version >= 3.10")
from freqtrade import __version__
@@ -24,7 +24,7 @@ from freqtrade.system import asyncio_setup, gc_set_threshold
logger = logging.getLogger("freqtrade")
def main(sysargv: Optional[list[str]] = None) -> None:
def main(sysargv: list[str] | None = None) -> None:
"""
This function will initiate the bot and start the trading loop.
:return: None

View File

@@ -7,7 +7,7 @@ import logging
from collections.abc import Iterator, Mapping
from io import StringIO
from pathlib import Path
from typing import Any, Optional, TextIO, Union
from typing import Any, TextIO
from urllib.parse import urlparse
import pandas as pd
@@ -129,10 +129,10 @@ def round_dict(d, n):
return {k: (round(v, n) if isinstance(v, float) else v) for k, v in d.items()}
DictMap = Union[dict[str, Any], Mapping[str, Any]]
DictMap = dict[str, Any] | Mapping[str, Any]
def safe_value_fallback(obj: DictMap, key1: str, key2: Optional[str] = None, default_value=None):
def safe_value_fallback(obj: DictMap, key1: str, key2: str | None = None, default_value=None):
"""
Search a value in obj, return this if it's not None.
Then search key2 in obj - return that if it's not none - then use default_value.
@@ -161,7 +161,7 @@ def safe_value_fallback2(dict1: DictMap, dict2: DictMap, key1: str, key2: str, d
return default_value
def plural(num: float, singular: str, plural: Optional[str] = None) -> str:
def plural(num: float, singular: str, plural: str | None = None) -> str:
return singular if (num == 1 or num == -1) else plural or singular + "s"

View File

@@ -1,4 +1,4 @@
from typing import Callable
from collections.abc import Callable
from cachetools import TTLCache, cached

View File

@@ -1,7 +1,7 @@
import logging
import time
from pathlib import Path
from typing import Any, Union
from typing import Any
import pandas as pd
from rich.text import Text
@@ -21,7 +21,7 @@ class LookaheadAnalysisSubFunctions:
def text_table_lookahead_analysis_instances(
config: dict[str, Any],
lookahead_instances: list[LookaheadAnalysis],
caption: Union[str, None] = None,
caption: str | None = None,
):
headers = [
"filename",
@@ -243,7 +243,7 @@ class LookaheadAnalysisSubFunctions:
# report the results
if lookaheadAnalysis_instances:
caption: Union[str, None] = None
caption: str | None = None
if any(
[
any(

View File

@@ -1,7 +1,6 @@
import hashlib
from copy import deepcopy
from pathlib import Path
from typing import Union
import rapidjson
@@ -38,7 +37,7 @@ def get_strategy_run_id(strategy) -> str:
return digest.hexdigest().lower()
def get_backtest_metadata_filename(filename: Union[Path, str]) -> Path:
def get_backtest_metadata_filename(filename: Path | str) -> Path:
"""Return metadata filename for specified backtest results file."""
filename = Path(filename)
return filename.parent / Path(f"{filename.stem}.meta{filename.suffix}")

View File

@@ -8,7 +8,7 @@ import logging
from collections import defaultdict
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from typing import Any, Optional
from typing import Any
from numpy import nan
from pandas import DataFrame
@@ -110,7 +110,7 @@ class Backtesting:
backtesting.start()
"""
def __init__(self, config: Config, exchange: Optional[Exchange] = None) -> None:
def __init__(self, config: Config, exchange: Exchange | None = None) -> None:
LoggingMixin.show_output = False
self.config = config
self.results: BacktestResultType = get_BacktestResultType_default()
@@ -685,7 +685,7 @@ class Backtesting:
)
def _try_close_open_order(
self, order: Optional[Order], trade: LocalTrade, current_date: datetime, row: tuple
self, order: Order | None, trade: LocalTrade, current_date: datetime, row: tuple
) -> bool:
"""
Check if an order is open and if it should've filled.
@@ -742,8 +742,8 @@ class Backtesting:
row: tuple,
exit_: ExitCheckTuple,
current_time: datetime,
amount: Optional[float] = None,
) -> Optional[LocalTrade]:
amount: float | None = None,
) -> LocalTrade | None:
if exit_.exit_flag:
trade.close_date = current_time
exit_reason = exit_.exit_reason
@@ -822,8 +822,8 @@ class Backtesting:
sell_row: tuple,
close_rate: float,
amount: float,
exit_reason: Optional[str],
) -> Optional[LocalTrade]:
exit_reason: str | None,
) -> LocalTrade | None:
self.order_id_counter += 1
exit_candle_time = sell_row[DATE_IDX].to_pydatetime()
order_type = self.strategy.order_types["exit"]
@@ -859,7 +859,7 @@ class Backtesting:
def _check_trade_exit(
self, trade: LocalTrade, row: tuple, current_time: datetime
) -> Optional[LocalTrade]:
) -> LocalTrade | None:
self._run_funding_fees(trade, current_time)
# Check if we need to adjust our current positions
@@ -909,10 +909,10 @@ class Backtesting:
stake_amount: float,
direction: LongShort,
current_time: datetime,
entry_tag: Optional[str],
trade: Optional[LocalTrade],
entry_tag: str | None,
trade: LocalTrade | None,
order_type: str,
price_precision: Optional[float],
price_precision: float | None,
) -> tuple[float, float, float, float]:
if order_type == "limit":
new_rate = strategy_safe_wrapper(
@@ -1004,12 +1004,12 @@ class Backtesting:
pair: str,
row: tuple,
direction: LongShort,
stake_amount: Optional[float] = None,
trade: Optional[LocalTrade] = None,
requested_rate: Optional[float] = None,
requested_stake: Optional[float] = None,
entry_tag1: Optional[str] = None,
) -> Optional[LocalTrade]:
stake_amount: float | None = None,
trade: LocalTrade | None = None,
requested_rate: float | None = None,
requested_stake: float | None = None,
entry_tag1: str | None = None,
) -> LocalTrade | None:
"""
:param trade: Trade to adjust - initial entry if None
:param requested_rate: Adjusted entry rate
@@ -1139,7 +1139,7 @@ class Backtesting:
amount=amount,
filled=0,
remaining=amount,
cost=amount * propose_rate + trade.fee_open,
cost=amount * propose_rate * (1 + self.fee),
ft_order_tag=entry_tag,
)
order._trade_bt = trade
@@ -1178,7 +1178,7 @@ class Backtesting:
self.rejected_trades += 1
return False
def check_for_trade_entry(self, row) -> Optional[LongShort]:
def check_for_trade_entry(self, row) -> LongShort | None:
enter_long = row[LONG_IDX] == 1
exit_long = row[ELONG_IDX] == 1
enter_short = self._can_short and row[SHORT_IDX] == 1
@@ -1216,7 +1216,7 @@ class Backtesting:
def check_order_cancel(
self, trade: LocalTrade, order: Order, current_time: datetime
) -> Optional[bool]:
) -> bool | None:
"""
Check if current analyzed order has to be canceled.
Returns True if the trade should be Deleted (initial order was canceled),
@@ -1298,7 +1298,7 @@ class Backtesting:
def validate_row(
self, data: dict, pair: str, row_index: int, current_time: datetime
) -> Optional[tuple]:
) -> tuple | None:
try:
# Row is treated as "current incomplete candle".
# entry / exit signals are shifted by 1 to compensate for this.
@@ -1332,14 +1332,41 @@ class Backtesting:
row: tuple,
pair: str,
current_time: datetime,
trade_dir: Optional[LongShort],
trade_dir: LongShort | None,
can_enter: bool,
) -> None:
"""
Conditionally call backtest_loop_inner a 2nd time if shorting is enabled,
a position closed and a new signal in the other direction is available.
"""
if not self._can_short or trade_dir is None:
# No need to reverse position if shorting is disabled or there's no new signal
self.backtest_loop_inner(row, pair, current_time, trade_dir, can_enter)
else:
for _ in (0, 1):
a = self.backtest_loop_inner(row, pair, current_time, trade_dir, can_enter)
if not a or a == trade_dir:
# the trade didn't close or position change is in the same direction
break
def backtest_loop_inner(
self,
row: tuple,
pair: str,
current_time: datetime,
trade_dir: LongShort | None,
can_enter: bool,
) -> LongShort | None:
"""
NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized.
Backtesting processing for one candle/pair.
"""
exiting_dir: LongShort | None = None
if not self._position_stacking and len(LocalTrade.bt_trades_open_pp[pair]) > 0:
# position_stacking not supported for now.
exiting_dir = "short" if LocalTrade.bt_trades_open_pp[pair][0].is_short else "long"
for t in list(LocalTrade.bt_trades_open_pp[pair]):
# 1. Manage currently open orders of active trades
if self.manage_open_orders(t, current_time, row):
@@ -1380,6 +1407,10 @@ class Backtesting:
if order:
self._process_exit_order(order, trade, current_time, row, pair)
if exiting_dir and len(LocalTrade.bt_trades_open_pp[pair]) == 0:
return exiting_dir
return None
def time_pair_generator(
self, start_date: datetime, end_date: datetime, increment: timedelta, pairs: list[str]
):
@@ -1450,7 +1481,7 @@ class Backtesting:
self.dataprovider._set_dataframe_max_index(self.required_startup + row_index)
self.dataprovider._set_dataframe_max_date(current_time)
current_detail_time: datetime = row[DATE_IDX].to_pydatetime()
trade_dir: Optional[LongShort] = self.check_for_trade_entry(row)
trade_dir: LongShort | None = self.check_for_trade_entry(row)
if (
(trade_dir is not None or len(LocalTrade.bt_trades_open_pp[pair]) > 0)
@@ -1521,12 +1552,6 @@ class Backtesting:
backtest_start_time = datetime.now(timezone.utc)
self._set_strategy(strat)
# Use max_open_trades in backtesting, except --disable-max-market-positions is set
if not self.config.get("use_max_market_positions", True):
logger.info("Ignoring max_open_trades (--disable-max-market-positions was used) ...")
self.strategy.max_open_trades = float("inf")
self.config.update({"max_open_trades": self.strategy.max_open_trades})
# need to reprocess data every time to populate signals
preprocessed = self.strategy.advise_all_indicators(data)

View File

@@ -1,7 +1,7 @@
import logging
from copy import deepcopy
from datetime import datetime, timezone
from typing import Any, Optional
from typing import Any
from pandas import DataFrame
@@ -28,7 +28,7 @@ class BaseAnalysis:
def __init__(self, config: dict[str, Any], strategy_obj: dict):
self.failed_bias_check = True
self.full_varHolder = VarHolder()
self.exchange: Optional[Any] = None
self.exchange: Any | None = None
self._fee = None
# pull variables the scope of the lookahead_analysis-instance

View File

@@ -0,0 +1,5 @@
from freqtrade.optimize.hyperopt.hyperopt import Hyperopt
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_interface import IHyperOptLoss
__all__ = ["Hyperopt", "IHyperOptLoss"]

View File

@@ -0,0 +1,352 @@
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
"""
This module contains the hyperopt logic
"""
import logging
import random
import sys
from datetime import datetime
from math import ceil
from multiprocessing import Manager
from pathlib import Path
from typing import Any
import rapidjson
from joblib import Parallel, cpu_count, delayed, wrap_non_picklable_objects
from joblib.externals import cloudpickle
from rich.console import Console
from freqtrade.constants import FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
from freqtrade.enums import HyperoptState
from freqtrade.exceptions import OperationalException
from freqtrade.misc import file_dump_json, plural
from freqtrade.optimize.hyperopt.hyperopt_logger import logging_mp_handle, logging_mp_setup
from freqtrade.optimize.hyperopt.hyperopt_optimizer import HyperOptimizer
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt_tools import (
HyperoptStateContainer,
HyperoptTools,
hyperopt_serializer,
)
from freqtrade.util import get_progress_tracker
logger = logging.getLogger(__name__)
INITIAL_POINTS = 30
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
# in the skopt model queue, to optimize memory consumption
SKOPT_MODEL_QUEUE_SIZE = 10
log_queue: Any
class Hyperopt:
"""
Hyperopt class, this class contains all the logic to run a hyperopt simulation
To start a hyperopt run:
hyperopt = Hyperopt(config)
hyperopt.start()
"""
def __init__(self, config: Config) -> None:
self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
self.config = config
self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
HyperoptStateContainer.set_state(HyperoptState.STARTUP)
if self.config.get("hyperopt"):
raise OperationalException(
"Using separate Hyperopt files has been removed in 2021.9. Please convert "
"your existing Hyperopt file to the new Hyperoptable strategy interface"
)
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
strategy = str(self.config["strategy"])
self.results_file: Path = (
self.config["user_data_dir"]
/ "hyperopt_results"
/ f"strategy_{strategy}_{time_now}.fthypt"
)
self.data_pickle_file = (
self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
)
self.total_epochs = config.get("epochs", 0)
self.current_best_loss = 100
self.clean_hyperopt()
self.num_epochs_saved = 0
self.current_best_epoch: dict[str, Any] | None = None
if HyperoptTools.has_space(self.config, "sell"):
# Make sure use_exit_signal is enabled
self.config["use_exit_signal"] = True
self.print_all = self.config.get("print_all", False)
self.hyperopt_table_header = 0
self.print_colorized = self.config.get("print_colorized", False)
self.print_json = self.config.get("print_json", False)
self.hyperopter = HyperOptimizer(self.config)
@staticmethod
def get_lock_filename(config: Config) -> str:
return str(config["user_data_dir"] / "hyperopt.lock")
def clean_hyperopt(self) -> None:
"""
Remove hyperopt pickle files to restart hyperopt.
"""
for f in [self.data_pickle_file, self.results_file]:
p = Path(f)
if p.is_file():
logger.info(f"Removing `{p}`.")
p.unlink()
def hyperopt_pickle_magic(self, bases) -> None:
"""
Hyperopt magic to allow strategy inheritance across files.
For this to properly work, we need to register the module of the imported class
to pickle as value.
"""
for modules in bases:
if modules.__name__ != "IStrategy":
cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
self.hyperopt_pickle_magic(modules.__bases__)
def _save_result(self, epoch: dict) -> None:
"""
Save hyperopt results to file
Store one line per epoch.
While not a valid json object - this allows appending easily.
:param epoch: result dictionary for this epoch.
"""
epoch[FTHYPT_FILEVERSION] = 2
with self.results_file.open("a") as f:
rapidjson.dump(
epoch,
f,
default=hyperopt_serializer,
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
)
f.write("\n")
self.num_epochs_saved += 1
logger.debug(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
# Store hyperopt filename
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
def print_results(self, results: dict[str, Any]) -> None:
"""
Log results if it is better than any previous evaluation
TODO: this should be moved to HyperoptTools too
"""
is_best = results["is_best"]
if self.print_all or is_best:
self._hyper_out.add_data(
self.config,
[results],
self.total_epochs,
self.print_all,
)
def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
"""Start optimizer in a parallel way"""
def optimizer_wrapper(*args, **kwargs):
# global log queue. This must happen in the file that initializes Parallel
logging_mp_setup(
log_queue, logging.INFO if self.config["verbosity"] < 1 else logging.DEBUG
)
return self.hyperopter.generate_optimizer(*args, **kwargs)
return parallel(delayed(wrap_non_picklable_objects(optimizer_wrapper))(v) for v in asked)
def _set_random_state(self, random_state: int | None) -> int:
return random_state or random.randint(1, 2**16 - 1) # noqa: S311
def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
"""
Enforce points returned from `self.opt.ask` have not been already evaluated
Steps:
1. Try to get points using `self.opt.ask` first
2. Discard the points that have already been evaluated
3. Retry using `self.opt.ask` up to 3 times
4. If still some points are missing in respect to `n_points`, random sample some points
5. Repeat until at least `n_points` points in the `asked_non_tried` list
6. Return a list with length truncated at `n_points`
"""
def unique_list(a_list):
new_list = []
for item in a_list:
if item not in new_list:
new_list.append(item)
return new_list
i = 0
asked_non_tried: list[list[Any]] = []
is_random_non_tried: list[bool] = []
while i < 5 and len(asked_non_tried) < n_points:
if i < 3:
self.opt.cache_ = {}
asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
is_random = [False for _ in range(len(asked))]
else:
asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
is_random = [True for _ in range(len(asked))]
is_random_non_tried += [
rand
for x, rand in zip(asked, is_random, strict=False)
if x not in self.opt.Xi and x not in asked_non_tried
]
asked_non_tried += [
x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
]
i += 1
if asked_non_tried:
return (
asked_non_tried[: min(len(asked_non_tried), n_points)],
is_random_non_tried[: min(len(asked_non_tried), n_points)],
)
else:
return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
"""
Evaluate results returned from generate_optimizer
"""
val["current_epoch"] = current
val["is_initial_point"] = current <= INITIAL_POINTS
logger.debug("Optimizer epoch evaluated: %s", val)
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
val["is_best"] = is_best
val["is_random"] = is_random
self.print_results(val)
if is_best:
self.current_best_loss = val["loss"]
self.current_best_epoch = val
self._save_result(val)
def _setup_logging_mp_workaround(self) -> None:
"""
Workaround for logging in child processes.
local_queue must be a global in the file that initializes Parallel.
"""
global log_queue
m = Manager()
log_queue = m.Queue()
def start(self) -> None:
self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
logger.info(f"Using optimizer random state: {self.random_state}")
self.hyperopt_table_header = -1
self.hyperopter.prepare_hyperopt()
cpus = cpu_count()
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
config_jobs = self.config.get("hyperopt_jobs", -1)
logger.info(f"Number of parallel jobs set as: {config_jobs}")
self.opt = self.hyperopter.get_optimizer(
config_jobs, self.random_state, INITIAL_POINTS, SKOPT_MODEL_QUEUE_SIZE
)
self._setup_logging_mp_workaround()
try:
with Parallel(n_jobs=config_jobs) as parallel:
jobs = parallel._effective_n_jobs()
logger.info(f"Effective number of parallel workers used: {jobs}")
console = Console(
color_system="auto" if self.print_colorized else None,
)
# Define progressbar
with get_progress_tracker(
console=console,
cust_callables=[self._hyper_out],
) as pbar:
task = pbar.add_task("Epochs", total=self.total_epochs)
start = 0
if self.analyze_per_epoch:
# First analysis not in parallel mode when using --analyze-per-epoch.
# This allows dataprovider to load it's informative cache.
asked, is_random = self.get_asked_points(n_points=1)
f_val0 = self.hyperopter.generate_optimizer(asked[0])
self.opt.tell(asked, [f_val0["loss"]])
self.evaluate_result(f_val0, 1, is_random[0])
pbar.update(task, advance=1)
start += 1
evals = ceil((self.total_epochs - start) / jobs)
for i in range(evals):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - (self.total_epochs - start)
current_jobs = jobs - n_rest if n_rest > 0 else jobs
asked, is_random = self.get_asked_points(n_points=current_jobs)
f_val = self.run_optimizer_parallel(parallel, asked)
self.opt.tell(asked, [v["loss"] for v in f_val])
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1 + start
self.evaluate_result(val, current, is_random[j])
pbar.update(task, advance=1)
logging_mp_handle(log_queue)
except KeyboardInterrupt:
print("User interrupted..")
logger.info(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
if self.current_best_epoch:
HyperoptTools.try_export_params(
self.config,
self.hyperopter.get_strategy_name(),
self.current_best_epoch,
)
HyperoptTools.show_epoch_details(
self.current_best_epoch, self.total_epochs, self.print_json
)
elif self.num_epochs_saved > 0:
print(
f"No good result found for given optimization function in {self.num_epochs_saved} "
f"{plural(self.num_epochs_saved, 'epoch')}."
)
else:
# This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated.
print("No epochs evaluated yet, no best result.")

View File

@@ -5,8 +5,8 @@ This module implements a convenience auto-hyperopt class, which can be used toge
"""
import logging
from collections.abc import Callable
from contextlib import suppress
from typing import Callable
from freqtrade.exceptions import OperationalException
@@ -14,7 +14,7 @@ from freqtrade.exceptions import OperationalException
with suppress(ImportError):
from skopt.space import Dimension
from freqtrade.optimize.hyperopt_interface import EstimatorType, IHyperOpt
from freqtrade.optimize.hyperopt.hyperopt_interface import EstimatorType, IHyperOpt
logger = logging.getLogger(__name__)

View File

@@ -6,7 +6,7 @@ This module defines the interface to apply for hyperopt
import logging
import math
from abc import ABC
from typing import Union
from typing import TypeAlias
from sklearn.base import RegressorMixin
from skopt.space import Categorical, Dimension, Integer
@@ -20,7 +20,7 @@ from freqtrade.strategy import IStrategy
logger = logging.getLogger(__name__)
EstimatorType = Union[RegressorMixin, str]
EstimatorType: TypeAlias = RegressorMixin | str
class IHyperOpt(ABC):

View File

@@ -0,0 +1,40 @@
import logging
from logging.handlers import QueueHandler
from multiprocessing import Queue, current_process
from queue import Empty
logger = logging.getLogger(__name__)
def logging_mp_setup(log_queue: Queue, verbosity: int):
"""
Setup logging in a child process.
Must be called in the child process before logging.
log_queue MUST be passed to the child process via inheritance
Which essentially means that the log_queue must be a global, created in the same
file as Parallel is initialized.
"""
current_proc = current_process().name
if current_proc != "MainProcess":
h = QueueHandler(log_queue)
root = logging.getLogger()
root.setLevel(verbosity)
root.addHandler(h)
def logging_mp_handle(q: Queue):
"""
Handle logging from a child process.
Must be called in the parent process to handle log messages from the child process.
"""
try:
while True:
record = q.get(block=False)
if record is None:
break
logger.handle(record)
except Empty:
pass

View File

@@ -1,45 +1,33 @@
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
"""
This module contains the hyperopt logic
This module contains the hyperopt optimizer class, which needs to be pickled
and will be sent to the hyperopt worker processes.
"""
import logging
import random
import sys
import warnings
from datetime import datetime, timezone
from math import ceil
from pathlib import Path
from typing import Any, Optional
from typing import Any
import rapidjson
from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects
from joblib import dump, load
from joblib.externals import cloudpickle
from pandas import DataFrame
from rich.console import Console
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
from freqtrade.constants import DATETIME_PRINT_FORMAT, Config
from freqtrade.data.converter import trim_dataframes
from freqtrade.data.history import get_timerange
from freqtrade.data.metrics import calculate_market_change
from freqtrade.enums import HyperoptState
from freqtrade.exceptions import OperationalException
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
from freqtrade.misc import deep_merge_dicts
from freqtrade.optimize.backtesting import Backtesting
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss
from freqtrade.optimize.hyperopt_output import HyperoptOutput
from freqtrade.optimize.hyperopt_tools import (
HyperoptStateContainer,
HyperoptTools,
hyperopt_serializer,
)
# Import IHyperOptLoss to allow unpickling classes from these modules
from freqtrade.optimize.hyperopt.hyperopt_auto import HyperOptAuto
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_interface import IHyperOptLoss
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer, HyperoptTools
from freqtrade.optimize.optimize_reports import generate_strategy_stats
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
from freqtrade.util import get_progress_tracker
# Suppress scikit-learn FutureWarnings from skopt
@@ -51,22 +39,13 @@ with warnings.catch_warnings():
logger = logging.getLogger(__name__)
INITIAL_POINTS = 30
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
# in the skopt model queue, to optimize memory consumption
SKOPT_MODEL_QUEUE_SIZE = 10
MAX_LOSS = 100000 # just a big enough number to be bad result in loss optimization
class Hyperopt:
class HyperOptimizer:
"""
Hyperopt class, this class contains all the logic to run a hyperopt simulation
To start a hyperopt run:
hyperopt = Hyperopt(config)
hyperopt.start()
HyperoptOptimizer class
This class is sent to the hyperopt worker processes.
"""
def __init__(self, config: Config) -> None:
@@ -79,8 +58,6 @@ class Hyperopt:
self.max_open_trades_space: list[Dimension] = []
self.dimensions: list[Dimension] = []
self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
self.config = config
self.min_date: datetime
self.max_date: datetime
@@ -89,7 +66,6 @@ class Hyperopt:
self.pairlist = self.backtesting.pairlists.whitelist
self.custom_hyperopt: HyperOptAuto
self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
HyperoptStateContainer.set_state(HyperoptState.STARTUP)
if not self.config.get("hyperopt"):
self.custom_hyperopt = HyperOptAuto(self.config)
@@ -107,54 +83,35 @@ class Hyperopt:
self.config
)
self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
strategy = str(self.config["strategy"])
self.results_file: Path = (
self.config["user_data_dir"]
/ "hyperopt_results"
/ f"strategy_{strategy}_{time_now}.fthypt"
)
self.data_pickle_file = (
self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
)
self.total_epochs = config.get("epochs", 0)
self.current_best_loss = 100
self.clean_hyperopt()
self.market_change = 0.0
self.num_epochs_saved = 0
self.current_best_epoch: Optional[dict[str, Any]] = None
# Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set
if not self.config.get("use_max_market_positions", True):
logger.debug("Ignoring max_open_trades (--disable-max-market-positions was used) ...")
self.backtesting.strategy.max_open_trades = float("inf")
config.update({"max_open_trades": self.backtesting.strategy.max_open_trades})
if HyperoptTools.has_space(self.config, "sell"):
# Make sure use_exit_signal is enabled
self.config["use_exit_signal"] = True
self.print_all = self.config.get("print_all", False)
self.hyperopt_table_header = 0
self.print_colorized = self.config.get("print_colorized", False)
self.print_json = self.config.get("print_json", False)
def prepare_hyperopt(self) -> None:
# Initialize spaces ...
self.init_spaces()
@staticmethod
def get_lock_filename(config: Config) -> str:
return str(config["user_data_dir"] / "hyperopt.lock")
self.prepare_hyperopt_data()
def clean_hyperopt(self) -> None:
"""
Remove hyperopt pickle files to restart hyperopt.
"""
for f in [self.data_pickle_file, self.results_file]:
p = Path(f)
if p.is_file():
logger.info(f"Removing `{p}`.")
p.unlink()
# We don't need exchange instance anymore while running hyperopt
self.backtesting.exchange.close()
self.backtesting.exchange._api = None
self.backtesting.exchange._api_async = None
self.backtesting.exchange.loop = None # type: ignore
self.backtesting.exchange._loop_lock = None # type: ignore
self.backtesting.exchange._cache_lock = None # type: ignore
# self.backtesting.exchange = None # type: ignore
self.backtesting.pairlists = None # type: ignore
def get_strategy_name(self) -> str:
return self.backtesting.strategy.get_strategy_name()
def hyperopt_pickle_magic(self, bases) -> None:
"""
@@ -177,33 +134,7 @@ class Hyperopt:
# Return a dict where the keys are the names of the dimensions
# and the values are taken from the list of parameters.
return {d.name: v for d, v in zip(dimensions, raw_params)}
def _save_result(self, epoch: dict) -> None:
"""
Save hyperopt results to file
Store one line per epoch.
While not a valid json object - this allows appending easily.
:param epoch: result dictionary for this epoch.
"""
epoch[FTHYPT_FILEVERSION] = 2
with self.results_file.open("a") as f:
rapidjson.dump(
epoch,
f,
default=hyperopt_serializer,
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
)
f.write("\n")
self.num_epochs_saved += 1
logger.debug(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
# Store hyperopt filename
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
return {d.name: v for d, v in zip(dimensions, raw_params, strict=False)}
def _get_params_details(self, params: dict) -> dict:
"""
@@ -257,21 +188,6 @@ class Hyperopt:
result["max_open_trades"] = {"max_open_trades": strategy.max_open_trades}
return result
def print_results(self, results: dict[str, Any]) -> None:
"""
Log results if it is better than any previous evaluation
TODO: this should be moved to HyperoptTools too
"""
is_best = results["is_best"]
if self.print_all or is_best:
self._hyper_out.add_data(
self.config,
[results],
self.total_epochs,
self.print_all,
)
def init_spaces(self):
"""
Assign the dimensions in the hyperoptimization space.
@@ -458,7 +374,14 @@ class Hyperopt:
"total_profit": total_profit,
}
def get_optimizer(self, dimensions: list[Dimension], cpu_count) -> Optimizer:
def get_optimizer(
self,
cpu_count: int,
random_state: int,
initial_points: int,
model_queue_size: int,
) -> Optimizer:
dimensions = self.dimensions
estimator = self.custom_hyperopt.generate_estimator(dimensions=dimensions)
acq_optimizer = "sampling"
@@ -473,21 +396,12 @@ class Hyperopt:
dimensions,
base_estimator=estimator,
acq_optimizer=acq_optimizer,
n_initial_points=INITIAL_POINTS,
n_initial_points=initial_points,
acq_optimizer_kwargs={"n_jobs": cpu_count},
random_state=self.random_state,
model_queue_size=SKOPT_MODEL_QUEUE_SIZE,
random_state=random_state,
model_queue_size=model_queue_size,
)
def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
"""Start optimizer in a parallel way"""
return parallel(
delayed(wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked
)
def _set_random_state(self, random_state: Optional[int]) -> int:
return random_state or random.randint(1, 2**16 - 1) # noqa: S311
def advise_and_trim(self, data: dict[str, DataFrame]) -> dict[str, DataFrame]:
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
@@ -523,173 +437,3 @@ class Hyperopt:
dump(preprocessed, self.data_pickle_file)
else:
dump(data, self.data_pickle_file)
def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
"""
Enforce points returned from `self.opt.ask` have not been already evaluated
Steps:
1. Try to get points using `self.opt.ask` first
2. Discard the points that have already been evaluated
3. Retry using `self.opt.ask` up to 3 times
4. If still some points are missing in respect to `n_points`, random sample some points
5. Repeat until at least `n_points` points in the `asked_non_tried` list
6. Return a list with length truncated at `n_points`
"""
def unique_list(a_list):
new_list = []
for item in a_list:
if item not in new_list:
new_list.append(item)
return new_list
i = 0
asked_non_tried: list[list[Any]] = []
is_random_non_tried: list[bool] = []
while i < 5 and len(asked_non_tried) < n_points:
if i < 3:
self.opt.cache_ = {}
asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
is_random = [False for _ in range(len(asked))]
else:
asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
is_random = [True for _ in range(len(asked))]
is_random_non_tried += [
rand
for x, rand in zip(asked, is_random)
if x not in self.opt.Xi and x not in asked_non_tried
]
asked_non_tried += [
x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
]
i += 1
if asked_non_tried:
return (
asked_non_tried[: min(len(asked_non_tried), n_points)],
is_random_non_tried[: min(len(asked_non_tried), n_points)],
)
else:
return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
"""
Evaluate results returned from generate_optimizer
"""
val["current_epoch"] = current
val["is_initial_point"] = current <= INITIAL_POINTS
logger.debug("Optimizer epoch evaluated: %s", val)
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
val["is_best"] = is_best
val["is_random"] = is_random
self.print_results(val)
if is_best:
self.current_best_loss = val["loss"]
self.current_best_epoch = val
self._save_result(val)
def start(self) -> None:
self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
logger.info(f"Using optimizer random state: {self.random_state}")
self.hyperopt_table_header = -1
# Initialize spaces ...
self.init_spaces()
self.prepare_hyperopt_data()
# We don't need exchange instance anymore while running hyperopt
self.backtesting.exchange.close()
self.backtesting.exchange._api = None
self.backtesting.exchange._api_async = None
self.backtesting.exchange.loop = None # type: ignore
self.backtesting.exchange._loop_lock = None # type: ignore
self.backtesting.exchange._cache_lock = None # type: ignore
# self.backtesting.exchange = None # type: ignore
self.backtesting.pairlists = None # type: ignore
cpus = cpu_count()
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
config_jobs = self.config.get("hyperopt_jobs", -1)
logger.info(f"Number of parallel jobs set as: {config_jobs}")
self.opt = self.get_optimizer(self.dimensions, config_jobs)
try:
with Parallel(n_jobs=config_jobs) as parallel:
jobs = parallel._effective_n_jobs()
logger.info(f"Effective number of parallel workers used: {jobs}")
console = Console(
color_system="auto" if self.print_colorized else None,
)
# Define progressbar
with get_progress_tracker(
console=console,
cust_callables=[self._hyper_out],
) as pbar:
task = pbar.add_task("Epochs", total=self.total_epochs)
start = 0
if self.analyze_per_epoch:
# First analysis not in parallel mode when using --analyze-per-epoch.
# This allows dataprovider to load it's informative cache.
asked, is_random = self.get_asked_points(n_points=1)
f_val0 = self.generate_optimizer(asked[0])
self.opt.tell(asked, [f_val0["loss"]])
self.evaluate_result(f_val0, 1, is_random[0])
pbar.update(task, advance=1)
start += 1
evals = ceil((self.total_epochs - start) / jobs)
for i in range(evals):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - (self.total_epochs - start)
current_jobs = jobs - n_rest if n_rest > 0 else jobs
asked, is_random = self.get_asked_points(n_points=current_jobs)
f_val = self.run_optimizer_parallel(parallel, asked)
self.opt.tell(asked, [v["loss"] for v in f_val])
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1 + start
self.evaluate_result(val, current, is_random[j])
pbar.update(task, advance=1)
except KeyboardInterrupt:
print("User interrupted..")
logger.info(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
if self.current_best_epoch:
HyperoptTools.try_export_params(
self.config, self.backtesting.strategy.get_strategy_name(), self.current_best_epoch
)
HyperoptTools.show_epoch_details(
self.current_best_epoch, self.total_epochs, self.print_json
)
elif self.num_epochs_saved > 0:
print(
f"No good result found for given optimization function in {self.num_epochs_saved} "
f"{plural(self.num_epochs_saved, 'epoch')}."
)
else:
# This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated.
print("No epochs evaluated yet, no best result.")

View File

@@ -1,6 +1,6 @@
import sys
from os import get_terminal_size
from typing import Any, Optional
from typing import Any
from rich.align import Align
from rich.console import Console
@@ -37,7 +37,7 @@ class HyperoptOutput:
self.table.add_column("Objective", justify="right")
self.table.add_column("Max Drawdown (Acct)", justify="right")
def print(self, console: Optional[Console] = None, *, print_colorized=True):
def print(self, console: Console | None = None, *, print_colorized=True):
if not console:
console = Console(
color_system="auto" if print_colorized else None,
@@ -57,7 +57,7 @@ class HyperoptOutput:
stake_currency = config["stake_currency"]
self._results.extend(results)
max_rows: Optional[int] = None
max_rows: int | None = None
if self._streaming:
try:

View File

@@ -3,7 +3,7 @@ from collections.abc import Iterator
from copy import deepcopy
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
from typing import Any
import numpy as np
import rapidjson
@@ -44,7 +44,7 @@ class HyperoptStateContainer:
class HyperoptTools:
@staticmethod
def get_strategy_filename(config: Config, strategy_name: str) -> Optional[Path]:
def get_strategy_filename(config: Config, strategy_name: str) -> Path | None:
"""
Get Strategy-location (filename) from strategy_name
"""
@@ -188,7 +188,7 @@ class HyperoptTools:
total_epochs: int,
print_json: bool,
no_header: bool = False,
header_str: Optional[str] = None,
header_str: str | None = None,
) -> None:
"""
Display details of the hyperopt result
@@ -257,7 +257,7 @@ class HyperoptTools:
@staticmethod
def _params_pretty_print(
params, space: str, header: str, non_optimized: Optional[dict] = None
params, space: str, header: str, non_optimized: dict | None = None
) -> None:
if space in params or (non_optimized and space in non_optimized):
space_params = HyperoptTools._space_params(params, space, 5)
@@ -299,7 +299,7 @@ class HyperoptTools:
print(result)
@staticmethod
def _space_params(params, space: str, r: Optional[int] = None) -> dict:
def _space_params(params, space: str, r: int | None = None) -> dict:
d = params.get(space)
if d:
# Round floats to `r` digits after the decimal point if requested

View File

@@ -1,5 +1,5 @@
import logging
from typing import Any, Literal, Union
from typing import Any, Literal
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config
from freqtrade.ft_types import BacktestResultType
@@ -18,7 +18,7 @@ def _get_line_floatfmt(stake_currency: str) -> list[str]:
def _get_line_header(
first_column: Union[str, list[str]], stake_currency: str, direction: str = "Trades"
first_column: str | list[str], stake_currency: str, direction: str = "Trades"
) -> list[str]:
"""
Generate header lines (goes in line with _generate_result_line())
@@ -172,7 +172,7 @@ def text_table_strategy(strategy_results, stake_currency: str, title: str):
dd_pad_per = max([len(dd) for dd in drawdown])
drawdown = [
f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
for t, dd in zip(strategy_results, drawdown)
for t, dd in zip(strategy_results, drawdown, strict=False)
]
output = [
@@ -186,7 +186,7 @@ def text_table_strategy(strategy_results, stake_currency: str, title: str):
generate_wins_draws_losses(t["wins"], t["draws"], t["losses"]),
drawdown,
]
for t, drawdown in zip(strategy_results, drawdown)
for t, drawdown in zip(strategy_results, drawdown, strict=False)
]
print_rich_table(output, headers, summary=title)

View File

@@ -1,6 +1,5 @@
import logging
from pathlib import Path
from typing import Optional
from pandas import DataFrame
@@ -35,7 +34,7 @@ def store_backtest_stats(
stats: BacktestResultType,
dtappendix: str,
*,
market_change_data: Optional[DataFrame] = None,
market_change_data: DataFrame | None = None,
) -> Path:
"""
Stores backtest results

View File

@@ -1,7 +1,7 @@
import logging
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from typing import Any, Literal, Union
from typing import Any, Literal
import numpy as np
from pandas import DataFrame, Series, concat, to_datetime
@@ -69,7 +69,7 @@ def generate_rejected_signals(
def _generate_result_line(
result: DataFrame, starting_balance: int, first_column: Union[str, list[str]]
result: DataFrame, starting_balance: int, first_column: str | list[str]
) -> dict:
"""
Generate one result dict, with "first_column" as key.
@@ -143,7 +143,7 @@ def generate_pair_metrics(
def generate_tag_metrics(
tag_type: Union[Literal["enter_tag", "exit_reason"], list[Literal["enter_tag", "exit_reason"]]],
tag_type: Literal["enter_tag", "exit_reason"] | list[Literal["enter_tag", "exit_reason"]],
starting_balance: int,
results: DataFrame,
skip_nan: bool = False,
@@ -208,7 +208,7 @@ def _get_resample_from_period(period: str) -> str:
def generate_periodic_breakdown_stats(
trade_list: Union[list, DataFrame], period: str
trade_list: list | DataFrame, period: str
) -> list[dict[str, Any]]:
results = trade_list if not isinstance(trade_list, list) else DataFrame.from_records(trade_list)
if len(results) == 0:
@@ -559,7 +559,7 @@ def generate_strategy_stats(
def generate_backtest_stats(
btdata: dict[str, DataFrame],
all_results: dict[str, dict[str, Union[DataFrame, dict]]],
all_results: dict[str, dict[str, DataFrame | dict]],
min_date: datetime,
max_date: datetime,
) -> BacktestResultType:

View File

@@ -2,7 +2,7 @@ import json
import logging
from collections.abc import Sequence
from datetime import datetime
from typing import Any, ClassVar, Optional
from typing import Any, ClassVar
from sqlalchemy import DateTime, ForeignKey, Integer, String, Text, UniqueConstraint, select
from sqlalchemy.orm import Mapped, mapped_column, relationship
@@ -42,7 +42,7 @@ class _CustomData(ModelBase):
cd_type: Mapped[str] = mapped_column(String(25), nullable=False)
cd_value: Mapped[str] = mapped_column(Text, nullable=False)
created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, default=dt_now)
updated_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True)
updated_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True)
# Empty container value - not persisted, but filled with cd_value on query
value: Any = None
@@ -62,7 +62,7 @@ class _CustomData(ModelBase):
@classmethod
def query_cd(
cls, key: Optional[str] = None, trade_id: Optional[int] = None
cls, key: str | None = None, trade_id: int | None = None
) -> Sequence["_CustomData"]:
"""
Get all CustomData, if trade_id is not specified
@@ -117,7 +117,7 @@ class CustomDataWrapper:
_CustomData.session.commit()
@staticmethod
def get_custom_data(*, trade_id: int, key: Optional[str] = None) -> list[_CustomData]:
def get_custom_data(*, trade_id: int, key: str | None = None) -> list[_CustomData]:
if CustomDataWrapper.use_db:
filters = [
_CustomData.ft_trade_id == trade_id,

View File

@@ -1,6 +1,6 @@
from datetime import datetime, timezone
from enum import Enum
from typing import ClassVar, Optional, Union
from typing import ClassVar
from sqlalchemy import String
from sqlalchemy.orm import Mapped, mapped_column
@@ -8,7 +8,7 @@ from sqlalchemy.orm import Mapped, mapped_column
from freqtrade.persistence.base import ModelBase, SessionType
ValueTypes = Union[str, datetime, float, int]
ValueTypes = str | datetime | float | int
class ValueTypesEnum(str, Enum):
@@ -37,10 +37,10 @@ class _KeyValueStoreModel(ModelBase):
value_type: Mapped[ValueTypesEnum] = mapped_column(String(20), nullable=False)
string_value: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
datetime_value: Mapped[Optional[datetime]]
float_value: Mapped[Optional[float]]
int_value: Mapped[Optional[int]]
string_value: Mapped[str | None] = mapped_column(String(255), nullable=True)
datetime_value: Mapped[datetime | None]
float_value: Mapped[float | None]
int_value: Mapped[int | None]
class KeyValueStore:
@@ -97,7 +97,7 @@ class KeyValueStore:
_KeyValueStoreModel.session.commit()
@staticmethod
def get_value(key: KeyStoreKeys) -> Optional[ValueTypes]:
def get_value(key: KeyStoreKeys) -> ValueTypes | None:
"""
Get the value for the given key.
:param key: Key to get the value for
@@ -121,7 +121,7 @@ class KeyValueStore:
raise ValueError(f"Unknown value type {kv.value_type}") # pragma: no cover
@staticmethod
def get_string_value(key: KeyStoreKeys) -> Optional[str]:
def get_string_value(key: KeyStoreKeys) -> str | None:
"""
Get the value for the given key.
:param key: Key to get the value for
@@ -139,7 +139,7 @@ class KeyValueStore:
return kv.string_value
@staticmethod
def get_datetime_value(key: KeyStoreKeys) -> Optional[datetime]:
def get_datetime_value(key: KeyStoreKeys) -> datetime | None:
"""
Get the value for the given key.
:param key: Key to get the value for
@@ -157,7 +157,7 @@ class KeyValueStore:
return kv.datetime_value.replace(tzinfo=timezone.utc)
@staticmethod
def get_float_value(key: KeyStoreKeys) -> Optional[float]:
def get_float_value(key: KeyStoreKeys) -> float | None:
"""
Get the value for the given key.
:param key: Key to get the value for
@@ -175,7 +175,7 @@ class KeyValueStore:
return kv.float_value
@staticmethod
def get_int_value(key: KeyStoreKeys) -> Optional[int]:
def get_int_value(key: KeyStoreKeys) -> int | None:
"""
Get the value for the given key.
:param key: Key to get the value for

View File

@@ -1,5 +1,4 @@
import logging
from typing import Optional
from sqlalchemy import inspect, select, text, update
@@ -32,8 +31,8 @@ def get_backup_name(tabs: list[str], backup_prefix: str):
def get_last_sequence_ids(engine, trade_back_name: str, order_back_name: str):
order_id: Optional[int] = None
trade_id: Optional[int] = None
order_id: int | None = None
trade_id: int | None = None
if engine.name == "postgresql":
with engine.begin() as connection:

View File

@@ -5,7 +5,7 @@ This module contains the class to persist trades into SQLite
import logging
import threading
from contextvars import ContextVar
from typing import Any, Final, Optional
from typing import Any, Final
from sqlalchemy import create_engine, inspect
from sqlalchemy.exc import NoSuchModuleError
@@ -25,10 +25,10 @@ logger = logging.getLogger(__name__)
REQUEST_ID_CTX_KEY: Final[str] = "request_id"
_request_id_ctx_var: ContextVar[Optional[str]] = ContextVar(REQUEST_ID_CTX_KEY, default=None)
_request_id_ctx_var: ContextVar[str | None] = ContextVar(REQUEST_ID_CTX_KEY, default=None)
def get_request_or_thread_id() -> Optional[str]:
def get_request_or_thread_id() -> str | None:
"""
Helper method to get either async context (for fastapi requests), or thread id
"""

View File

@@ -1,5 +1,5 @@
from datetime import datetime, timezone
from typing import Any, ClassVar, Optional
from typing import Any, ClassVar
from sqlalchemy import ScalarResult, String, or_, select
from sqlalchemy.orm import Mapped, mapped_column
@@ -21,7 +21,7 @@ class PairLock(ModelBase):
pair: Mapped[str] = mapped_column(String(25), nullable=False, index=True)
# lock direction - long, short or * (for both)
side: Mapped[str] = mapped_column(String(25), nullable=False, default="*")
reason: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
reason: Mapped[str | None] = mapped_column(String(255), nullable=True)
# Time the pair was locked (start time)
lock_time: Mapped[datetime] = mapped_column(nullable=False)
# Time until the pair is locked (end time)
@@ -39,7 +39,7 @@ class PairLock(ModelBase):
@staticmethod
def query_pair_locks(
pair: Optional[str], now: datetime, side: str = "*"
pair: str | None, now: datetime, side: str = "*"
) -> ScalarResult["PairLock"]:
"""
Get all currently active locks for this pair

View File

@@ -1,7 +1,6 @@
import logging
from collections.abc import Sequence
from datetime import datetime, timezone
from typing import Optional
from sqlalchemy import select
@@ -36,9 +35,9 @@ class PairLocks:
def lock_pair(
pair: str,
until: datetime,
reason: Optional[str] = None,
reason: str | None = None,
*,
now: Optional[datetime] = None,
now: datetime | None = None,
side: str = "*",
) -> PairLock:
"""
@@ -68,7 +67,7 @@ class PairLocks:
@staticmethod
def get_pair_locks(
pair: Optional[str], now: Optional[datetime] = None, side: str = "*"
pair: str | None, now: datetime | None = None, side: str = "*"
) -> Sequence[PairLock]:
"""
Get all currently active locks for this pair
@@ -96,8 +95,8 @@ class PairLocks:
@staticmethod
def get_pair_longest_lock(
pair: str, now: Optional[datetime] = None, side: str = "*"
) -> Optional[PairLock]:
pair: str, now: datetime | None = None, side: str = "*"
) -> PairLock | None:
"""
Get the lock that expires the latest for the pair given.
"""
@@ -106,7 +105,7 @@ class PairLocks:
return locks[0] if locks else None
@staticmethod
def unlock_pair(pair: str, now: Optional[datetime] = None, side: str = "*") -> None:
def unlock_pair(pair: str, now: datetime | None = None, side: str = "*") -> None:
"""
Release all locks for this pair.
:param pair: Pair to unlock
@@ -124,7 +123,7 @@ class PairLocks:
PairLock.session.commit()
@staticmethod
def unlock_reason(reason: str, now: Optional[datetime] = None) -> None:
def unlock_reason(reason: str, now: datetime | None = None) -> None:
"""
Release all locks for this reason.
:param reason: Which reason to unlock
@@ -155,7 +154,7 @@ class PairLocks:
lock.active = False
@staticmethod
def is_global_lock(now: Optional[datetime] = None, side: str = "*") -> bool:
def is_global_lock(now: datetime | None = None, side: str = "*") -> bool:
"""
:param now: Datetime object (generated via datetime.now(timezone.utc)).
defaults to datetime.now(timezone.utc)
@@ -166,7 +165,7 @@ class PairLocks:
return len(PairLocks.get_pair_locks("*", now, side)) > 0
@staticmethod
def is_pair_locked(pair: str, now: Optional[datetime] = None, side: str = "*") -> bool:
def is_pair_locked(pair: str, now: datetime | None = None, side: str = "*") -> bool:
"""
:param pair: Pair to check for
:param now: Datetime object (generated via datetime.now(timezone.utc)).

Some files were not shown because too many files have changed in this diff Show More