Merge pull request #10271 from freqtrade/new_release

New release 2024.5
This commit is contained in:
Matthias
2024-05-30 18:01:26 +02:00
committed by GitHub
431 changed files with 46277 additions and 37244 deletions

View File

@@ -18,15 +18,22 @@
"editor.insertSpaces": true,
"files.trimTrailingWhitespace": true,
"[markdown]": {
"files.trimTrailingWhitespace": false,
"files.trimTrailingWhitespace": false
},
"python.pythonPath": "/usr/local/bin/python",
"[python]": {
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit"
},
"editor.formatOnSave": true,
"editor.defaultFormatter": "charliermarsh.ruff"
}
},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"ms-python.python",
"ms-python.vscode-pylance",
"ms-python.isort",
"charliermarsh.ruff",
"davidanson.vscode-markdownlint",
"ms-azuretools.vscode-docker",
"vscode-icons-team.vscode-icons",

View File

@@ -19,7 +19,7 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: "3.11"
python-version: "3.12"
- name: Install ccxt
run: pip install ccxt

View File

@@ -111,7 +111,11 @@ jobs:
- name: Run Ruff
run: |
ruff check --output-format=github .
ruff check --output-format=github
- name: Run Ruff format check
run: |
ruff format --check
- name: Mypy
run: |
@@ -230,7 +234,11 @@ jobs:
- name: Run Ruff
run: |
ruff check --output-format=github .
ruff check --output-format=github
- name: Run Ruff format check
run: |
ruff format --check
- name: Mypy
run: |
@@ -300,7 +308,11 @@ jobs:
- name: Run Ruff
run: |
ruff check --output-format=github .
ruff check --output-format=github
- name: Run Ruff format check
run: |
ruff format --check
- name: Mypy
run: |
@@ -322,7 +334,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
python-version: "3.12"
- name: pre-commit dependencies
run: |
@@ -336,7 +348,7 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: "3.10"
python-version: "3.12"
- uses: pre-commit/action@v3.0.1
docs-check:
@@ -351,7 +363,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
python-version: "3.12"
- name: Documentation build
run: |
@@ -377,7 +389,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
python-version: "3.12"
- name: Cache_dependencies
uses: actions/cache@v4
@@ -459,7 +471,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
python-version: "3.12"
- name: Build distribution
run: |
@@ -530,7 +542,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
python-version: "3.12"
- name: Extract branch name
id: extract-branch
@@ -553,12 +565,12 @@ jobs:
sudo systemctl restart docker
docker version -f '{{.Server.Experimental}}'
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
id: buildx
uses: crazy-max/ghaction-docker-buildx@v3.3.1
with:
buildx-version: latest
qemu-version: latest
uses: docker/setup-buildx-action@v1
- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}

View File

@@ -2,6 +2,8 @@ name: Devcontainer Pre-Build
on:
workflow_dispatch:
schedule:
- cron: "0 3 * * 0"
# push:
# branches:
# - "master"

View File

@@ -17,7 +17,7 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: "3.11"
python-version: "3.12"
- name: Install pre-commit
@@ -26,9 +26,6 @@ jobs:
- name: Run auto-update
run: pre-commit autoupdate
- name: Run pre-commit
run: pre-commit run --all-files
- uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.REPO_SCOPED_TOKEN }}

View File

@@ -16,10 +16,10 @@ repos:
additional_dependencies:
- types-cachetools==5.3.0.7
- types-filelock==3.2.7
- types-requests==2.31.0.20240406
- types-requests==2.32.0.20240523
- types-tabulate==0.9.0.20240106
- types-python-dateutil==2.9.0.20240316
- SQLAlchemy==2.0.29
- SQLAlchemy==2.0.30
# stages: [push]
- repo: https://github.com/pycqa/isort
@@ -31,7 +31,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.4.2'
rev: 'v0.4.5'
hooks:
- id: ruff
@@ -56,7 +56,7 @@ repos:
)$
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
rev: v2.3.0
hooks:
- id: codespell
additional_dependencies:

11
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,11 @@
{
"recommendations": [
"ms-python.python",
"ms-python.vscode-pylance",
"charliermarsh.ruff",
"davidanson.vscode-markdownlint",
"ms-azuretools.vscode-docker",
"vscode-icons-team.vscode-icons",
"github.vscode-github-actions",
]
}

View File

@@ -72,12 +72,12 @@ you can manually run pre-commit with `pre-commit run -a`.
mypy freqtrade
```
### 4. Ensure all imports are correct
### 4. Ensure formatting is correct
#### Run isort
#### Run ruff
``` bash
isort .
ruff format .
```
## (Core)-Committer Guide

View File

@@ -29,6 +29,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
- [X] [Binance](https://www.binance.com/)
- [X] [Bitmart](https://bitmart.com/)
- [X] [BingX](https://bingx.com/invite/0EM9RX)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Kraken](https://kraken.com/)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -6,21 +6,18 @@ from pathlib import Path
import ccxt
key = os.environ.get('FREQTRADE__EXCHANGE__KEY')
secret = os.environ.get('FREQTRADE__EXCHANGE__SECRET')
key = os.environ.get("FREQTRADE__EXCHANGE__KEY")
secret = os.environ.get("FREQTRADE__EXCHANGE__SECRET")
proxy = os.environ.get('CI_WEB_PROXY')
proxy = os.environ.get("CI_WEB_PROXY")
exchange = ccxt.binance({
'apiKey': key,
'secret': secret,
'httpsProxy': proxy,
'options': {'defaultType': 'swap'}
})
exchange = ccxt.binance(
{"apiKey": key, "secret": secret, "httpsProxy": proxy, "options": {"defaultType": "swap"}}
)
_ = exchange.load_markets()
lev_tiers = exchange.fetch_leverage_tiers()
# Assumes this is running in the root of the repository.
file = Path('freqtrade/exchange/binance_leverage_tiers.json')
json.dump(dict(sorted(lev_tiers.items())), file.open('w'), indent=2)
file = Path("freqtrade/exchange/binance_leverage_tiers.json")
json.dump(dict(sorted(lev_tiers.items())), file.open("w"), indent=2)

View File

@@ -1,18 +1,15 @@
#!/usr/bin/env python3
from freqtrade_client import __version__ as client_version
from freqtrade import __version__ as ft_version
from freqtrade_client import __version__ as client_version
def main():
if ft_version != client_version:
print(f"Versions do not match: \n"
f"ft: {ft_version} \n"
f"client: {client_version}")
print(f"Versions do not match: \nft: {ft_version} \nclient: {client_version}")
exit(1)
print(f"Versions match: ft: {ft_version}, client: {client_version}")
exit(0)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -6,28 +6,30 @@ from pathlib import Path
import yaml
pre_commit_file = Path('.pre-commit-config.yaml')
require_dev = Path('requirements-dev.txt')
require = Path('requirements.txt')
pre_commit_file = Path(".pre-commit-config.yaml")
require_dev = Path("requirements-dev.txt")
require = Path("requirements.txt")
with require_dev.open('r') as rfile:
with require_dev.open("r") as rfile:
requirements = rfile.readlines()
with require.open('r') as rfile:
with require.open("r") as rfile:
requirements.extend(rfile.readlines())
# Extract types only
type_reqs = [r.strip('\n') for r in requirements if r.startswith(
'types-') or r.startswith('SQLAlchemy')]
type_reqs = [
r.strip("\n") for r in requirements if r.startswith("types-") or r.startswith("SQLAlchemy")
]
with pre_commit_file.open('r') as file:
with pre_commit_file.open("r") as file:
f = yaml.load(file, Loader=yaml.SafeLoader)
mypy_repo = [repo for repo in f['repos'] if repo['repo']
== 'https://github.com/pre-commit/mirrors-mypy']
mypy_repo = [
repo for repo in f["repos"] if repo["repo"] == "https://github.com/pre-commit/mirrors-mypy"
]
hooks = mypy_repo[0]['hooks'][0]['additional_dependencies']
hooks = mypy_repo[0]["hooks"][0]["additional_dependencies"]
errors = []
for hook in hooks:

View File

@@ -35,7 +35,7 @@ COPY build_helpers/* /tmp/
COPY --chown=ftuser:ftuser requirements.txt /freqtrade/
USER ftuser
RUN pip install --user --no-cache-dir numpy \
&& pip install --user --no-index --find-links /tmp/ pyarrow TA-Lib==0.4.28 \
&& pip install --user --no-index --find-links /tmp/ pyarrow TA-Lib \
&& pip install --user --no-cache-dir -r requirements.txt
# Copy dependencies to runtime-image

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 135 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 241 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -568,7 +568,14 @@ The possible values are: `GTC` (default), `FOK` or `IOC`.
This is ongoing work. For now, it is supported only for binance, gate and kucoin.
Please don't change the default value unless you know what you are doing and have researched the impact of using different values for your particular exchange.
### What values can be used for fiat_display_currency?
### Fiat conversion
Freqtrade uses the Coingecko API to convert the coin value to it's corresponding fiat value for the Telegram reports.
The FIAT currency can be set in the configuration file as `fiat_display_currency`.
Removing `fiat_display_currency` completely from the configuration will skip initializing coingecko, and will not show any FIAT currency conversion. This has no importance for the correct functioning of the bot.
#### What values can be used for fiat_display_currency?
The `fiat_display_currency` configuration parameter sets the base currency to use for the
conversion from coin to fiat in the bot Telegram reports.
@@ -587,7 +594,25 @@ The valid values are:
"BTC", "ETH", "XRP", "LTC", "BCH", "BNB"
```
Removing `fiat_display_currency` completely from the configuration will skip initializing coingecko, and will not show any FIAT currency conversion. This has no importance for the correct functioning of the bot.
#### Coingecko Rate limit problems
On some IP ranges, coingecko is heavily rate-limiting.
In such cases, you may want to add your coingecko API key to the configuration.
``` json
{
"fiat_display_currency": "USD",
"coingecko": {
"api_key": "your-api",
"is_demo": true
}
}
```
Freqtrade supports both Demo and Pro coingecko API keys.
The Coingecko API key is NOT required for the bot to function correctly.
It is only used for the conversion of coin to fiat in the Telegram reports, which usually also work without API key.
## Using Dry-run mode

View File

@@ -24,10 +24,10 @@ usage: freqtrade download-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[--days INT] [--new-pairs-days INT]
[--include-inactive-pairs]
[--timerange TIMERANGE] [--dl-trades]
[--exchange EXCHANGE]
[--convert] [--exchange EXCHANGE]
[-t TIMEFRAMES [TIMEFRAMES ...]] [--erase]
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
[--data-format-trades {json,jsongz,hdf5,feather}]
[--data-format-trades {json,jsongz,hdf5,feather,parquet}]
[--trading-mode {spot,margin,futures}]
[--prepend]
@@ -48,6 +48,11 @@ options:
--dl-trades Download trades instead of OHLCV data. The bot will
resample trades to the desired timeframe as specified
as --timeframes/-t.
--convert Convert downloaded trades to OHLCV data. Only
applicable in combination with `--dl-trades`. Will be
automatic for exchanges which don't have historic
OHLCV (e.g. Kraken). If not provided, use `trades-to-
ohlcv` to convert trades data to OHLCV data.
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
Specify which tickers to download. Space-separated
@@ -57,7 +62,7 @@ options:
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
Storage format for downloaded candle (OHLCV) data.
(default: `feather`).
--data-format-trades {json,jsongz,hdf5,feather}
--data-format-trades {json,jsongz,hdf5,feather,parquet}
Storage format for downloaded trades data. (default:
`feather`).
--trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
@@ -471,15 +476,20 @@ ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h
## Trades (tick) data
By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API.
By default, `download-data` sub-command downloads Candles (OHLCV) data. Most exchanges also provide historic trade-data via their API.
This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes.
Since this data is large by default, the files use the feather fileformat by default. They are stored in your data-directory with the naming convention of `<pair>-trades.feather` (`ETH_BTC-trades.feather`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository.
Since this data is large by default, the files use the feather file format by default. They are stored in your data-directory with the naming convention of `<pair>-trades.feather` (`ETH_BTC-trades.feather`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository.
To use this mode, simply add `--dl-trades` to your call. This will swap the download method to download trades, and resamples the data locally.
To use this mode, simply add `--dl-trades` to your call. This will swap the download method to download trades.
If `--convert` is also provided, the resample step will happen automatically and overwrite eventually existing OHLCV data for the given pair/timeframe combinations.
!!! Warning "do not use"
You should not use this unless you're a kraken user. Most other exchanges provide OHLCV data with sufficient history.
!!! Warning "Do not use"
You should not use this unless you're a kraken user (Kraken does not provide historic OHLCV data).
Most other exchanges provide OHLCV data with sufficient history, so downloading multiple timeframes through that method will still proof to be a lot faster than downloading trades data.
!!! Note "Kraken user"
Kraken users should read [this](exchanges.md#historic-kraken-data) before starting to download data.
Example call:
@@ -490,12 +500,6 @@ freqtrade download-data --exchange kraken --pairs XRP/EUR ETH/EUR --days 20 --dl
!!! Note
While this method uses async calls, it will be slow, since it requires the result of the previous call to generate the next request to the exchange.
!!! Warning
The historic trades are not available during Freqtrade dry-run and live trade modes because all exchanges tested provide this data with a delay of few 100 candles, so it's not suitable for real-time trading.
!!! Note "Kraken user"
Kraken users should read [this](exchanges.md#historic-kraken-data) before starting to download data.
## Next step
Great, you now have backtest data downloaded, so you can now start [backtesting](backtesting.md) your strategy.
Great, you now have some data downloaded, so you can now start [backtesting](backtesting.md) your strategy.

View File

@@ -127,6 +127,13 @@ These settings will be checked on startup, and freqtrade will show an error if t
Freqtrade will not attempt to change these settings.
## Bingx
BingX supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "IOC" (immediate-or-cancel) and "PO" (Post only) settings.
!!! Tip "Stoploss on Exchange"
Bingx supports `stoploss_on_exchange` and can use both stop-limit and stop-market orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
## Kraken
Kraken supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "IOC" (immediate-or-cancel) and "PO" (Post only) settings.

85
docs/freq-ui.md Normal file
View File

@@ -0,0 +1,85 @@
# FreqUI
Freqtrade provides a builtin webserver, which can serve [FreqUI](https://github.com/freqtrade/frequi), the freqtrade frontend.
By default, the UI is automatically installed as part of the installation (script, docker).
freqUI can also be manually installed by using the `freqtrade install-ui` command.
This same command can also be used to update freqUI to new new releases.
Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured API port (by default `http://127.0.0.1:8080`).
??? Note "Looking to contribute to freqUI?"
Developers should not use this method, but instead clone the corresponding use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI. A working installation of node will be required to build the frontend.
!!! tip "freqUI is not required to run freqtrade"
freqUI is an optional component of freqtrade, and is not required to run the bot.
It is a frontend that can be used to monitor the bot and to interact with it - but freqtrade itself will work perfectly fine without it.
## Configuration
FreqUI does not have it's own configuration file - but assumes a working setup for the [rest-api](rest-api.md) is available.
Please refer to the corresponding documentation page to get setup with freqUI
## UI
FreqUI is a modern, responsive web application that can be used to monitor and interact with your bot.
FreqUI provides a light, as well as a dark theme.
Themes can be easily switched via a prominent button at the top of the page.
The theme of the screenshots on this page will adapt to the selected documentation Theme, so to see the dark (or light) version, please switch the theme of the Documentation.
### Login
The below screenshot shows the login screen of freqUI.
![FreqUI - login](assets/frequi-login-CORS.png#only-dark)
![FreqUI - login](assets/frequi-login-CORS-light.png#only-light)
!!! Hint "CORS"
The Cors error shown in this screenshot is due to the fact that the UI is running on a different port than the API, and [CORS](#cors) has not been setup correctly yet.
### Trade view
The trade view allows you to visualize the trades that the bot is making and to interact with the bot.
On this page, you can also interact with the bot by starting and stopping it and - if configured - force trade entries and exits.
![FreqUI - trade view](assets/freqUI-trade-pane-dark.png#only-dark)
![FreqUI - trade view](assets/freqUI-trade-pane-light.png#only-light)
### Plot Configurator
FreqUI Plots can be configured either via a `plot_config` configuration object in the strategy (which can be loaded via "from strategy" button) or via the UI.
Multiple plot configurations can be created and switched at will - allowing for flexible, different views into your charts.
The plot configuration can be accessed via the "Plot Configurator" (Cog icon) button in the top right corner of the trade view.
![FreqUI - plot configuration](assets/freqUI-plot-configurator-dark.png#only-dark)
![FreqUI - plot configuration](assets/freqUI-plot-configurator-light.png#only-light)
### Settings
Several UI related settings can be changed by accessing the settings page.
Things you can change (among others):
* Timezone of the UI
* Visualization of open trades as part of the favicon (browser tab)
* Candle colors (up/down -> red/green)
* Enable / disable in-app notification types
![FreqUI - Settings view](assets/frequi-settings-dark.png#only-dark)
![FreqUI - Settings view](assets/frequi-settings-light.png#only-light)
## Backtesting
When freqtrade is started in [webserver mode](utils.md#webserver-mode) (freqtrade started with `freqtrade webserver`), the backtesting view becomes available.
This view allows you to backtest strategies and visualize the results.
You can also load and visualize previous backtest results, as well as compare the results with each other.
![FreqUI - Backtesting](assets/freqUI-backtesting-dark.png#only-dark)
![FreqUI - Backtesting](assets/freqUI-backtesting-light.png#only-light)
--8<-- "includes/cors.md"

View File

@@ -224,7 +224,7 @@ where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. B
## Building the data pipeline
By default, FreqAI builds a dynamic pipeline based on user congfiguration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`.
By default, FreqAI builds a dynamic pipeline based on user configuration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`.
!!! note "More information available"
Please review the [parameter table](freqai-parameter-table.md) for more information on these parameters.
@@ -391,3 +391,18 @@ Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters
![dbscan](assets/freqai_dbscan.jpg)
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html) (external website)) with `min_samples` ($N$) taken as 1/4 of the no. of time points (candles) in the feature set. `eps` ($\varepsilon$) is computed automatically as the elbow point in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
### Data dimensionality reduction with Principal Component Analysis
You can reduce the dimensionality of your features by activating the principal_component_analysis in the config:
```json
"freqai": {
"feature_parameters" : {
"principal_component_analysis": true
}
}
```
This will perform PCA on the features and reduce their dimensionality so that the explained variance of the data set is >= 0.999. Reducing data dimensionality makes training the model faster and hence allows for more up-to-date models.

View File

@@ -36,7 +36,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `weight_factor` | Weight training data points according to their recency (see details [here](freqai-feature-engineering.md#weighting-features-for-temporal-importance)). <br> **Datatype:** Positive float (typically < 1).
| `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `feature_engineering_*()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN. <br> **Datatype:** Positive integer.
| `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers.
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`.
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`.
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/<identifier>/sub-train-<COIN>_<timestamp>.html`. <br> **Datatype:** Integer. <br> Default: `0`.
| `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1).
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.

43
docs/includes/cors.md Normal file
View File

@@ -0,0 +1,43 @@
## CORS
This whole section is only necessary in cross-origin cases (where you multiple bot API's running on `localhost:8081`, `localhost:8082`, ...), and want to combine them into one FreqUI instance.
??? info "Technical explanation"
All web-based front-ends are subject to [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) - Cross-Origin Resource Sharing.
Since most of the requests to the Freqtrade API must be authenticated, a proper CORS policy is key to avoid security problems.
Also, the standard disallows `*` CORS policies for requests with credentials, so this setting must be set appropriately.
Users can allow access from different origin URL's to the bot API via the `CORS_origins` configuration setting.
It consists of a list of allowed URL's that are allowed to consume resources from the bot's API.
Assuming your application is deployed as `https://frequi.freqtrade.io/home/` - this would mean that the following configuration becomes necessary:
```jsonc
{
//...
"jwt_secret_key": "somethingrandom",
"CORS_origins": ["https://frequi.freqtrade.io"],
//...
}
```
In the following (pretty common) case, FreqUI is accessible on `http://localhost:8080/trade` (this is what you see in your navbar when navigating to freqUI).
![freqUI url](assets/frequi_url.png)
The correct configuration for this case is `http://localhost:8080` - the main part of the URL including the port.
```jsonc
{
//...
"jwt_secret_key": "somethingrandom",
"CORS_origins": ["http://localhost:8080"],
//...
}
```
!!! Tip "trailing Slash"
The trailing slash is not allowed in the `CORS_origins` configuration (e.g. `"http://localhots:8080/"`).
Such a configuration will not take effect, and the cors errors will remain.
!!! Note
We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot.

View File

@@ -41,6 +41,7 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual,
- [X] [Binance](https://www.binance.com/)
- [X] [Bitmart](https://bitmart.com/)
- [X] [BingX](https://bingx.com/invite/0EM9RX)
- [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Kraken](https://kraken.com/)

View File

@@ -286,7 +286,7 @@ cd freqtrade
#### Freqtrade install: Conda Environment
```bash
conda create --name freqtrade python=3.11
conda create --name freqtrade python=3.12
```
!!! Note "Creating Conda Environment"

View File

@@ -1,6 +1,6 @@
markdown==3.6
mkdocs==1.6.0
mkdocs-material==9.5.19
mkdocs-material==9.5.24
mdx_truly_sane_lists==1.3
pymdown-extensions==10.8.1
jinja2==3.1.3
jinja2==3.1.4

View File

@@ -1,16 +1,8 @@
# REST API & FreqUI
# REST API
## FreqUI
Freqtrade provides a builtin webserver, which can serve [FreqUI](https://github.com/freqtrade/frequi), the freqtrade UI.
By default, the UI is not included in the installation (except for docker images), and must be installed explicitly with `freqtrade install-ui`.
This same command can also be used to update freqUI, should there be a new release.
Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured port below (usually `http://127.0.0.1:8080`).
!!! Note "developers"
Developers should not use this method, but instead use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI.
FreqUI now has it's own dedicated [documentation section](frequi.md) - please refer to that section for all information regarding the FreqUI.
## Configuration
@@ -169,7 +161,7 @@ freqtrade-client --config rest_config.json <command> [optional parameters]
| `delete_lock <lock_id>` | Deletes (disables) the lock by id.
| `locks add <pair>, <until>, [side], [reason]` | Locks a pair until "until". (Until will be rounded up to the nearest timeframe).
| `profit` | Display a summary of your profit/loss from close trades and some stats about your performance.
| `forceexit <trade_id>` | Instantly exits the given trade (Ignoring `minimum_roi`).
| `forceexit <trade_id> [order_type] [amount]` | Instantly exits the given trade (ignoring `minimum_roi`), using the given order type ("market" or "limit", uses your config setting if not specified), and the chosen amount (full sell if not specified).
| `forceexit all` | Instantly exits all open trades (Ignoring `minimum_roi`).
| `forceenter <pair> [rate]` | Instantly enters the given pair. Rate is optional. (`force_entry_enable` must be set to True)
| `forceenter <pair> <side> [rate]` | Instantly longs or shorts the given pair. Rate is optional. (`force_entry_enable` must be set to True)
@@ -488,42 +480,4 @@ Since the access token has a short timeout (15 min) - the `token/refresh` reques
{"access_token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1ODkxMTk5NzQsIm5iZiI6MTU4OTExOTk3NCwianRpIjoiMDBjNTlhMWUtMjBmYS00ZTk0LTliZjAtNWQwNTg2MTdiZDIyIiwiZXhwIjoxNTg5MTIwODc0LCJpZGVudGl0eSI6eyJ1IjoiRnJlcXRyYWRlciJ9LCJmcmVzaCI6ZmFsc2UsInR5cGUiOiJhY2Nlc3MifQ.1seHlII3WprjjclY6DpRhen0rqdF4j6jbvxIhUFaSbs"}
```
### CORS
This whole section is only necessary in cross-origin cases (where you multiple bot API's running on `localhost:8081`, `localhost:8082`, ...), and want to combine them into one FreqUI instance.
??? info "Technical explanation"
All web-based front-ends are subject to [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) - Cross-Origin Resource Sharing.
Since most of the requests to the Freqtrade API must be authenticated, a proper CORS policy is key to avoid security problems.
Also, the standard disallows `*` CORS policies for requests with credentials, so this setting must be set appropriately.
Users can allow access from different origin URL's to the bot API via the `CORS_origins` configuration setting.
It consists of a list of allowed URL's that are allowed to consume resources from the bot's API.
Assuming your application is deployed as `https://frequi.freqtrade.io/home/` - this would mean that the following configuration becomes necessary:
```jsonc
{
//...
"jwt_secret_key": "somethingrandom",
"CORS_origins": ["https://frequi.freqtrade.io"],
//...
}
```
In the following (pretty common) case, FreqUI is accessible on `http://localhost:8080/trade` (this is what you see in your navbar when navigating to freqUI).
![freqUI url](assets/frequi_url.png)
The correct configuration for this case is `http://localhost:8080` - the main part of the URL including the port.
```jsonc
{
//...
"jwt_secret_key": "somethingrandom",
"CORS_origins": ["http://localhost:8080"],
//...
}
```
!!! Note
We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot.
--8<-- "includes/cors.md"

View File

@@ -30,6 +30,7 @@ The Order-type will be ignored if only one mode is available.
|----------|-------------|
| Binance | limit |
| Binance Futures | market, limit |
| Bingx | market, limit |
| HTX (former Huobi) | limit |
| kraken | market, limit |
| Gate | limit |

View File

@@ -24,7 +24,7 @@ git clone https://github.com/freqtrade/freqtrade.git
Install ta-lib according to the [ta-lib documentation](https://github.com/TA-Lib/ta-lib-python#windows).
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), Freqtrade provides these dependencies (in the binary wheel format) for the latest 3 Python versions (3.9, 3.10 and 3.11) and for 64bit Windows.
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), Freqtrade provides these dependencies (in the binary wheel format) for the latest 3 Python versions (3.9, 3.10, 3.11 and 3.12) and for 64bit Windows.
These Wheels are also used by CI running on windows, and are therefore tested together with freqtrade.
Other versions must be downloaded from the above link.

View File

@@ -1,21 +1,33 @@
""" Freqtrade bot """
__version__ = '2024.4'
"""Freqtrade bot"""
if 'dev' in __version__:
__version__ = "2024.5"
if "dev" in __version__:
from pathlib import Path
try:
import subprocess
freqtrade_basedir = Path(__file__).parent
__version__ = __version__ + '-' + subprocess.check_output(
['git', 'log', '--format="%h"', '-n 1'],
stderr=subprocess.DEVNULL, cwd=freqtrade_basedir).decode("utf-8").rstrip().strip('"')
__version__ = (
__version__
+ "-"
+ subprocess.check_output(
["git", "log", '--format="%h"', "-n 1"],
stderr=subprocess.DEVNULL,
cwd=freqtrade_basedir,
)
.decode("utf-8")
.rstrip()
.strip('"')
)
except Exception: # pragma: no cover
# git not available, ignore
try:
# Try Fallback to freqtrade_commit file (created by CI while building docker image)
versionfile = Path('./freqtrade_commit')
versionfile = Path("./freqtrade_commit")
if versionfile.is_file():
__version__ = f"docker-{__version__}-{versionfile.read_text()[:8]}"
except Exception:

View File

@@ -9,5 +9,5 @@ To launch Freqtrade as a module
from freqtrade import main
if __name__ == '__main__':
if __name__ == "__main__":
main.main()

View File

@@ -6,22 +6,39 @@ Contains all start-commands, subcommands and CLI Interface creation.
Note: Be careful with file-scoped imports in these subfiles.
as they are parsed on startup, nothing containing optional modules should be loaded.
"""
from freqtrade.commands.analyze_commands import start_analysis_entries_exits
from freqtrade.commands.arguments import Arguments
from freqtrade.commands.build_config_commands import start_new_config, start_show_config
from freqtrade.commands.data_commands import (start_convert_data, start_convert_trades,
start_download_data, start_list_data)
from freqtrade.commands.data_commands import (
start_convert_data,
start_convert_trades,
start_download_data,
start_list_data,
)
from freqtrade.commands.db_commands import start_convert_db
from freqtrade.commands.deploy_commands import (start_create_userdir, start_install_ui,
start_new_strategy)
from freqtrade.commands.deploy_commands import (
start_create_userdir,
start_install_ui,
start_new_strategy,
)
from freqtrade.commands.hyperopt_commands import start_hyperopt_list, start_hyperopt_show
from freqtrade.commands.list_commands import (start_list_exchanges, start_list_freqAI_models,
start_list_markets, start_list_strategies,
start_list_timeframes, start_show_trades)
from freqtrade.commands.optimize_commands import (start_backtesting, start_backtesting_show,
start_edge, start_hyperopt,
start_lookahead_analysis,
start_recursive_analysis)
from freqtrade.commands.list_commands import (
start_list_exchanges,
start_list_freqAI_models,
start_list_markets,
start_list_strategies,
start_list_timeframes,
start_show_trades,
)
from freqtrade.commands.optimize_commands import (
start_backtesting,
start_backtesting_show,
start_edge,
start_hyperopt,
start_lookahead_analysis,
start_recursive_analysis,
)
from freqtrade.commands.pairlist_commands import start_test_pairlist
from freqtrade.commands.plot_commands import start_plot_dataframe, start_plot_profit
from freqtrade.commands.strategy_utils_commands import start_strategy_update

View File

@@ -20,25 +20,25 @@ def setup_analyze_configuration(args: Dict[str, Any], method: RunMode) -> Dict[s
config = setup_utils_configuration(args, method)
no_unlimited_runmodes = {
RunMode.BACKTEST: 'backtesting',
RunMode.BACKTEST: "backtesting",
}
if method in no_unlimited_runmodes.keys():
from freqtrade.data.btanalysis import get_latest_backtest_filename
if 'exportfilename' in config:
if config['exportfilename'].is_dir():
btfile = Path(get_latest_backtest_filename(config['exportfilename']))
if "exportfilename" in config:
if config["exportfilename"].is_dir():
btfile = Path(get_latest_backtest_filename(config["exportfilename"]))
signals_file = f"{config['exportfilename']}/{btfile.stem}_signals.pkl"
else:
if config['exportfilename'].exists():
btfile = Path(config['exportfilename'])
if config["exportfilename"].exists():
btfile = Path(config["exportfilename"])
signals_file = f"{btfile.parent}/{btfile.stem}_signals.pkl"
else:
raise ConfigurationError(f"{config['exportfilename']} does not exist.")
else:
raise ConfigurationError('exportfilename not in config.')
raise ConfigurationError("exportfilename not in config.")
if (not Path(signals_file).exists()):
if not Path(signals_file).exists():
raise OperationalException(
f"Cannot find latest backtest signals file: {signals_file}."
"Run backtesting with `--export signals`."
@@ -58,6 +58,6 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None:
# Initialize configuration
config = setup_analyze_configuration(args, RunMode.BACKTEST)
logger.info('Starting freqtrade in analysis mode')
logger.info("Starting freqtrade in analysis mode")
process_entry_exit_reasons(config)

View File

@@ -1,6 +1,7 @@
"""
This module contains the argument manager class
"""
import argparse
from functools import partial
from pathlib import Path
@@ -12,35 +13,72 @@ from freqtrade.constants import DEFAULT_CONFIG
ARGS_COMMON = ["verbosity", "logfile", "version", "config", "datadir", "user_data_dir"]
ARGS_STRATEGY = ["strategy", "strategy_path", "recursive_strategy_search", "freqaimodel",
"freqaimodel_path"]
ARGS_STRATEGY = [
"strategy",
"strategy_path",
"recursive_strategy_search",
"freqaimodel",
"freqaimodel_path",
]
ARGS_TRADE = ["db_url", "sd_notify", "dry_run", "dry_run_wallet", "fee"]
ARGS_WEBSERVER: List[str] = []
ARGS_COMMON_OPTIMIZE = ["timeframe", "timerange", "dataformat_ohlcv",
"max_open_trades", "stake_amount", "fee", "pairs"]
ARGS_COMMON_OPTIMIZE = [
"timeframe",
"timerange",
"dataformat_ohlcv",
"max_open_trades",
"stake_amount",
"fee",
"pairs",
]
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions",
"enable_protections", "dry_run_wallet", "timeframe_detail",
"strategy_list", "export", "exportfilename",
"backtest_breakdown", "backtest_cache",
"freqai_backtest_live_models"]
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
"strategy_list",
"export",
"exportfilename",
"backtest_breakdown",
"backtest_cache",
"freqai_backtest_live_models",
]
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path",
"position_stacking", "use_max_market_positions",
"enable_protections", "dry_run_wallet", "timeframe_detail",
"epochs", "spaces", "print_all",
"print_colorized", "print_json", "hyperopt_jobs",
"hyperopt_random_state", "hyperopt_min_trades",
"hyperopt_loss", "disableparamexport",
"hyperopt_ignore_missing_space", "analyze_per_epoch"]
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [
"hyperopt",
"hyperopt_path",
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
"epochs",
"spaces",
"print_all",
"print_colorized",
"print_json",
"hyperopt_jobs",
"hyperopt_random_state",
"hyperopt_min_trades",
"hyperopt_loss",
"disableparamexport",
"hyperopt_ignore_missing_space",
"analyze_per_epoch",
]
ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"]
ARGS_LIST_STRATEGIES = ["strategy_path", "print_one_column", "print_colorized",
"recursive_strategy_search"]
ARGS_LIST_STRATEGIES = [
"strategy_path",
"print_one_column",
"print_colorized",
"recursive_strategy_search",
]
ARGS_LIST_FREQAIMODELS = ["freqaimodel_path", "print_one_column", "print_colorized"]
@@ -52,12 +90,27 @@ ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"]
ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"]
ARGS_LIST_PAIRS = ["exchange", "print_list", "list_pairs_print_json", "print_one_column",
"print_csv", "base_currencies", "quote_currencies", "list_pairs_all",
"trading_mode"]
ARGS_LIST_PAIRS = [
"exchange",
"print_list",
"list_pairs_print_json",
"print_one_column",
"print_csv",
"base_currencies",
"quote_currencies",
"list_pairs_all",
"trading_mode",
]
ARGS_TEST_PAIRLIST = ["user_data_dir", "verbosity", "config", "quote_currencies",
"print_one_column", "list_pairs_print_json", "exchange"]
ARGS_TEST_PAIRLIST = [
"user_data_dir",
"verbosity",
"config",
"quote_currencies",
"print_one_column",
"list_pairs_print_json",
"exchange",
]
ARGS_CREATE_USERDIR = ["user_data_dir", "reset"]
@@ -70,22 +123,59 @@ ARGS_CONVERT_DATA_TRADES = ["pairs", "format_from_trades", "format_to", "erase",
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"]
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode", "candle_types"]
ARGS_CONVERT_TRADES = ["pairs", "timeframes", "exchange", "dataformat_ohlcv", "dataformat_trades",
"trading_mode"]
ARGS_CONVERT_TRADES = [
"pairs",
"timeframes",
"exchange",
"dataformat_ohlcv",
"dataformat_trades",
"trading_mode",
]
ARGS_LIST_DATA = ["exchange", "dataformat_ohlcv", "pairs", "trading_mode", "show_timerange"]
ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "new_pairs_days", "include_inactive",
"timerange", "download_trades", "exchange", "timeframes",
"erase", "dataformat_ohlcv", "dataformat_trades", "trading_mode",
"prepend_data"]
ARGS_DOWNLOAD_DATA = [
"pairs",
"pairs_file",
"days",
"new_pairs_days",
"include_inactive",
"timerange",
"download_trades",
"convert_trades",
"exchange",
"timeframes",
"erase",
"dataformat_ohlcv",
"dataformat_trades",
"trading_mode",
"prepend_data",
]
ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit",
"db_url", "trade_source", "export", "exportfilename",
"timerange", "timeframe", "no_trades"]
ARGS_PLOT_DATAFRAME = [
"pairs",
"indicators1",
"indicators2",
"plot_limit",
"db_url",
"trade_source",
"export",
"exportfilename",
"timerange",
"timeframe",
"no_trades",
]
ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url",
"trade_source", "timeframe", "plot_auto_open", ]
ARGS_PLOT_PROFIT = [
"pairs",
"timerange",
"export",
"exportfilename",
"db_url",
"trade_source",
"timeframe",
"plot_auto_open",
]
ARGS_CONVERT_DB = ["db_url", "db_url_from"]
@@ -93,36 +183,76 @@ ARGS_INSTALL_UI = ["erase_ui_only", "ui_version"]
ARGS_SHOW_TRADES = ["db_url", "trade_ids", "print_json"]
ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable",
"hyperopt_list_min_trades", "hyperopt_list_max_trades",
"hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time",
"hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit",
"hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit",
"hyperopt_list_min_objective", "hyperopt_list_max_objective",
"print_colorized", "print_json", "hyperopt_list_no_details",
"hyperoptexportfilename", "export_csv"]
ARGS_HYPEROPT_LIST = [
"hyperopt_list_best",
"hyperopt_list_profitable",
"hyperopt_list_min_trades",
"hyperopt_list_max_trades",
"hyperopt_list_min_avg_time",
"hyperopt_list_max_avg_time",
"hyperopt_list_min_avg_profit",
"hyperopt_list_max_avg_profit",
"hyperopt_list_min_total_profit",
"hyperopt_list_max_total_profit",
"hyperopt_list_min_objective",
"hyperopt_list_max_objective",
"print_colorized",
"print_json",
"hyperopt_list_no_details",
"hyperoptexportfilename",
"export_csv",
]
ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index",
"print_json", "hyperoptexportfilename", "hyperopt_show_no_header",
"disableparamexport", "backtest_breakdown"]
ARGS_HYPEROPT_SHOW = [
"hyperopt_list_best",
"hyperopt_list_profitable",
"hyperopt_show_index",
"print_json",
"hyperoptexportfilename",
"hyperopt_show_no_header",
"disableparamexport",
"backtest_breakdown",
]
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
"exit_reason_list", "indicator_list", "timerange",
"analysis_rejected", "analysis_to_csv", "analysis_csv_path"]
ARGS_ANALYZE_ENTRIES_EXITS = [
"exportfilename",
"analysis_groups",
"enter_reason_list",
"exit_reason_list",
"indicator_list",
"timerange",
"analysis_rejected",
"analysis_to_csv",
"analysis_csv_path",
]
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
"list-data", "hyperopt-list", "hyperopt-show", "backtest-filter",
"plot-dataframe", "plot-profit", "show-trades", "trades-to-ohlcv",
"strategy-updater"]
NO_CONF_REQURIED = [
"convert-data",
"convert-trade-data",
"download-data",
"list-timeframes",
"list-markets",
"list-pairs",
"list-strategies",
"list-freqaimodels",
"list-data",
"hyperopt-list",
"hyperopt-show",
"backtest-filter",
"plot-dataframe",
"plot-profit",
"show-trades",
"trades-to-ohlcv",
"strategy-updater",
]
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"]
ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_search"]
ARGS_LOOKAHEAD_ANALYSIS = [
a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", 'cache')
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", "cache")
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
ARGS_RECURSIVE_ANALYSIS = ["timeframe", "timerange", "dataformat_ohlcv", "pairs", "startup_candle"]
@@ -156,14 +286,14 @@ class Arguments:
# Workaround issue in argparse with action='append' and default value
# (see https://bugs.python.org/issue16399)
# Allow no-config for certain commands (like downloading / plotting)
if ('config' in parsed_arg and parsed_arg.config is None):
conf_required = ('command' in parsed_arg and parsed_arg.command in NO_CONF_REQURIED)
if "config" in parsed_arg and parsed_arg.config is None:
conf_required = "command" in parsed_arg and parsed_arg.command in NO_CONF_REQURIED
if 'user_data_dir' in parsed_arg and parsed_arg.user_data_dir is not None:
if "user_data_dir" in parsed_arg and parsed_arg.user_data_dir is not None:
user_dir = parsed_arg.user_data_dir
else:
# Default case
user_dir = 'user_data'
user_dir = "user_data"
# Try loading from "user_data/config.json"
cfgfile = Path(user_dir) / DEFAULT_CONFIG
if cfgfile.is_file():
@@ -177,7 +307,6 @@ class Arguments:
return parsed_arg
def _build_args(self, optionlist, parser):
for val in optionlist:
opt = AVAILABLE_CLI_OPTIONS[val]
parser.add_argument(*opt.cli, dest=val, **opt.kwargs)
@@ -198,43 +327,61 @@ class Arguments:
# Build main command
self.parser = argparse.ArgumentParser(
prog="freqtrade",
description='Free, open source crypto trading bot'
prog="freqtrade", description="Free, open source crypto trading bot"
)
self._build_args(optionlist=['version'], parser=self.parser)
self._build_args(optionlist=["version"], parser=self.parser)
from freqtrade.commands import (start_analysis_entries_exits, start_backtesting,
start_backtesting_show, start_convert_data,
start_convert_db, start_convert_trades,
start_create_userdir, start_download_data, start_edge,
start_hyperopt, start_hyperopt_list, start_hyperopt_show,
start_install_ui, start_list_data, start_list_exchanges,
start_list_freqAI_models, start_list_markets,
start_list_strategies, start_list_timeframes,
start_lookahead_analysis, start_new_config,
start_new_strategy, start_plot_dataframe, start_plot_profit,
start_recursive_analysis, start_show_config,
start_show_trades, start_strategy_update,
start_test_pairlist, start_trading, start_webserver)
from freqtrade.commands import (
start_analysis_entries_exits,
start_backtesting,
start_backtesting_show,
start_convert_data,
start_convert_db,
start_convert_trades,
start_create_userdir,
start_download_data,
start_edge,
start_hyperopt,
start_hyperopt_list,
start_hyperopt_show,
start_install_ui,
start_list_data,
start_list_exchanges,
start_list_freqAI_models,
start_list_markets,
start_list_strategies,
start_list_timeframes,
start_lookahead_analysis,
start_new_config,
start_new_strategy,
start_plot_dataframe,
start_plot_profit,
start_recursive_analysis,
start_show_config,
start_show_trades,
start_strategy_update,
start_test_pairlist,
start_trading,
start_webserver,
)
subparsers = self.parser.add_subparsers(dest='command',
# Use custom message when no subhandler is added
# shown from `main.py`
# required=True
)
subparsers = self.parser.add_subparsers(
dest="command",
# Use custom message when no subhandler is added
# shown from `main.py`
# required=True
)
# Add trade subcommand
trade_cmd = subparsers.add_parser(
'trade',
help='Trade module.',
parents=[_common_parser, _strategy_parser]
"trade", help="Trade module.", parents=[_common_parser, _strategy_parser]
)
trade_cmd.set_defaults(func=start_trading)
self._build_args(optionlist=ARGS_TRADE, parser=trade_cmd)
# add create-userdir subcommand
create_userdir_cmd = subparsers.add_parser(
'create-userdir',
"create-userdir",
help="Create user-data directory.",
)
create_userdir_cmd.set_defaults(func=start_create_userdir)
@@ -242,7 +389,7 @@ class Arguments:
# add new-config subcommand
build_config_cmd = subparsers.add_parser(
'new-config',
"new-config",
help="Create new config",
)
build_config_cmd.set_defaults(func=start_new_config)
@@ -250,7 +397,7 @@ class Arguments:
# add show-config subcommand
show_config_cmd = subparsers.add_parser(
'show-config',
"show-config",
help="Show resolved config",
)
show_config_cmd.set_defaults(func=start_show_config)
@@ -258,7 +405,7 @@ class Arguments:
# add new-strategy subcommand
build_strategy_cmd = subparsers.add_parser(
'new-strategy',
"new-strategy",
help="Create new strategy",
)
build_strategy_cmd.set_defaults(func=start_new_strategy)
@@ -266,8 +413,8 @@ class Arguments:
# Add download-data subcommand
download_data_cmd = subparsers.add_parser(
'download-data',
help='Download backtesting data.',
"download-data",
help="Download backtesting data.",
parents=[_common_parser],
)
download_data_cmd.set_defaults(func=start_download_data)
@@ -275,8 +422,8 @@ class Arguments:
# Add convert-data subcommand
convert_data_cmd = subparsers.add_parser(
'convert-data',
help='Convert candle (OHLCV) data from one format to another.',
"convert-data",
help="Convert candle (OHLCV) data from one format to another.",
parents=[_common_parser],
)
convert_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=True))
@@ -284,8 +431,8 @@ class Arguments:
# Add convert-trade-data subcommand
convert_trade_data_cmd = subparsers.add_parser(
'convert-trade-data',
help='Convert trade data from one format to another.',
"convert-trade-data",
help="Convert trade data from one format to another.",
parents=[_common_parser],
)
convert_trade_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=False))
@@ -293,8 +440,8 @@ class Arguments:
# Add trades-to-ohlcv subcommand
convert_trade_data_cmd = subparsers.add_parser(
'trades-to-ohlcv',
help='Convert trade data to OHLCV data.',
"trades-to-ohlcv",
help="Convert trade data to OHLCV data.",
parents=[_common_parser],
)
convert_trade_data_cmd.set_defaults(func=start_convert_trades)
@@ -302,8 +449,8 @@ class Arguments:
# Add list-data subcommand
list_data_cmd = subparsers.add_parser(
'list-data',
help='List downloaded data.',
"list-data",
help="List downloaded data.",
parents=[_common_parser],
)
list_data_cmd.set_defaults(func=start_list_data)
@@ -311,17 +458,15 @@ class Arguments:
# Add backtesting subcommand
backtesting_cmd = subparsers.add_parser(
'backtesting',
help='Backtesting module.',
parents=[_common_parser, _strategy_parser]
"backtesting", help="Backtesting module.", parents=[_common_parser, _strategy_parser]
)
backtesting_cmd.set_defaults(func=start_backtesting)
self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd)
# Add backtesting-show subcommand
backtesting_show_cmd = subparsers.add_parser(
'backtesting-show',
help='Show past Backtest results',
"backtesting-show",
help="Show past Backtest results",
parents=[_common_parser],
)
backtesting_show_cmd.set_defaults(func=start_backtesting_show)
@@ -329,26 +474,22 @@ class Arguments:
# Add backtesting analysis subcommand
analysis_cmd = subparsers.add_parser(
'backtesting-analysis',
help='Backtest Analysis module.',
parents=[_common_parser]
"backtesting-analysis", help="Backtest Analysis module.", parents=[_common_parser]
)
analysis_cmd.set_defaults(func=start_analysis_entries_exits)
self._build_args(optionlist=ARGS_ANALYZE_ENTRIES_EXITS, parser=analysis_cmd)
# Add edge subcommand
edge_cmd = subparsers.add_parser(
'edge',
help='Edge module.',
parents=[_common_parser, _strategy_parser]
"edge", help="Edge module.", parents=[_common_parser, _strategy_parser]
)
edge_cmd.set_defaults(func=start_edge)
self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd)
# Add hyperopt subcommand
hyperopt_cmd = subparsers.add_parser(
'hyperopt',
help='Hyperopt module.',
"hyperopt",
help="Hyperopt module.",
parents=[_common_parser, _strategy_parser],
)
hyperopt_cmd.set_defaults(func=start_hyperopt)
@@ -356,8 +497,8 @@ class Arguments:
# Add hyperopt-list subcommand
hyperopt_list_cmd = subparsers.add_parser(
'hyperopt-list',
help='List Hyperopt results',
"hyperopt-list",
help="List Hyperopt results",
parents=[_common_parser],
)
hyperopt_list_cmd.set_defaults(func=start_hyperopt_list)
@@ -365,8 +506,8 @@ class Arguments:
# Add hyperopt-show subcommand
hyperopt_show_cmd = subparsers.add_parser(
'hyperopt-show',
help='Show details of Hyperopt results',
"hyperopt-show",
help="Show details of Hyperopt results",
parents=[_common_parser],
)
hyperopt_show_cmd.set_defaults(func=start_hyperopt_show)
@@ -374,8 +515,8 @@ class Arguments:
# Add list-exchanges subcommand
list_exchanges_cmd = subparsers.add_parser(
'list-exchanges',
help='Print available exchanges.',
"list-exchanges",
help="Print available exchanges.",
parents=[_common_parser],
)
list_exchanges_cmd.set_defaults(func=start_list_exchanges)
@@ -383,8 +524,8 @@ class Arguments:
# Add list-markets subcommand
list_markets_cmd = subparsers.add_parser(
'list-markets',
help='Print markets on exchange.',
"list-markets",
help="Print markets on exchange.",
parents=[_common_parser],
)
list_markets_cmd.set_defaults(func=partial(start_list_markets, pairs_only=False))
@@ -392,8 +533,8 @@ class Arguments:
# Add list-pairs subcommand
list_pairs_cmd = subparsers.add_parser(
'list-pairs',
help='Print pairs on exchange.',
"list-pairs",
help="Print pairs on exchange.",
parents=[_common_parser],
)
list_pairs_cmd.set_defaults(func=partial(start_list_markets, pairs_only=True))
@@ -401,8 +542,8 @@ class Arguments:
# Add list-strategies subcommand
list_strategies_cmd = subparsers.add_parser(
'list-strategies',
help='Print available strategies.',
"list-strategies",
help="Print available strategies.",
parents=[_common_parser],
)
list_strategies_cmd.set_defaults(func=start_list_strategies)
@@ -410,8 +551,8 @@ class Arguments:
# Add list-freqAI Models subcommand
list_freqaimodels_cmd = subparsers.add_parser(
'list-freqaimodels',
help='Print available freqAI models.',
"list-freqaimodels",
help="Print available freqAI models.",
parents=[_common_parser],
)
list_freqaimodels_cmd.set_defaults(func=start_list_freqAI_models)
@@ -419,8 +560,8 @@ class Arguments:
# Add list-timeframes subcommand
list_timeframes_cmd = subparsers.add_parser(
'list-timeframes',
help='Print available timeframes for the exchange.',
"list-timeframes",
help="Print available timeframes for the exchange.",
parents=[_common_parser],
)
list_timeframes_cmd.set_defaults(func=start_list_timeframes)
@@ -428,8 +569,8 @@ class Arguments:
# Add show-trades subcommand
show_trades = subparsers.add_parser(
'show-trades',
help='Show trades.',
"show-trades",
help="Show trades.",
parents=[_common_parser],
)
show_trades.set_defaults(func=start_show_trades)
@@ -437,8 +578,8 @@ class Arguments:
# Add test-pairlist subcommand
test_pairlist_cmd = subparsers.add_parser(
'test-pairlist',
help='Test your pairlist configuration.',
"test-pairlist",
help="Test your pairlist configuration.",
)
test_pairlist_cmd.set_defaults(func=start_test_pairlist)
self._build_args(optionlist=ARGS_TEST_PAIRLIST, parser=test_pairlist_cmd)
@@ -453,16 +594,16 @@ class Arguments:
# Add install-ui subcommand
install_ui_cmd = subparsers.add_parser(
'install-ui',
help='Install FreqUI',
"install-ui",
help="Install FreqUI",
)
install_ui_cmd.set_defaults(func=start_install_ui)
self._build_args(optionlist=ARGS_INSTALL_UI, parser=install_ui_cmd)
# Add Plotting subcommand
plot_dataframe_cmd = subparsers.add_parser(
'plot-dataframe',
help='Plot candles with indicators.',
"plot-dataframe",
help="Plot candles with indicators.",
parents=[_common_parser, _strategy_parser],
)
plot_dataframe_cmd.set_defaults(func=start_plot_dataframe)
@@ -470,8 +611,8 @@ class Arguments:
# Plot profit
plot_profit_cmd = subparsers.add_parser(
'plot-profit',
help='Generate plot showing profits.',
"plot-profit",
help="Generate plot showing profits.",
parents=[_common_parser, _strategy_parser],
)
plot_profit_cmd.set_defaults(func=start_plot_profit)
@@ -479,40 +620,36 @@ class Arguments:
# Add webserver subcommand
webserver_cmd = subparsers.add_parser(
'webserver',
help='Webserver module.',
parents=[_common_parser]
"webserver", help="Webserver module.", parents=[_common_parser]
)
webserver_cmd.set_defaults(func=start_webserver)
self._build_args(optionlist=ARGS_WEBSERVER, parser=webserver_cmd)
# Add strategy_updater subcommand
strategy_updater_cmd = subparsers.add_parser(
'strategy-updater',
help='updates outdated strategy files to the current version',
parents=[_common_parser]
"strategy-updater",
help="updates outdated strategy files to the current version",
parents=[_common_parser],
)
strategy_updater_cmd.set_defaults(func=start_strategy_update)
self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd)
# Add lookahead_analysis subcommand
lookahead_analayis_cmd = subparsers.add_parser(
'lookahead-analysis',
"lookahead-analysis",
help="Check for potential look ahead bias.",
parents=[_common_parser, _strategy_parser]
parents=[_common_parser, _strategy_parser],
)
lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis)
self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS,
parser=lookahead_analayis_cmd)
self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS, parser=lookahead_analayis_cmd)
# Add recursive_analysis subcommand
recursive_analayis_cmd = subparsers.add_parser(
'recursive-analysis',
"recursive-analysis",
help="Check for potential recursive formula issue.",
parents=[_common_parser, _strategy_parser]
parents=[_common_parser, _strategy_parser],
)
recursive_analayis_cmd.set_defaults(func=start_recursive_analysis)
self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS,
parser=recursive_analayis_cmd)
self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS, parser=recursive_analayis_cmd)

View File

@@ -45,7 +45,7 @@ def ask_user_overwrite(config_path: Path) -> bool:
},
]
answers = prompt(questions)
return answers['overwrite']
return answers["overwrite"]
def ask_user_config() -> Dict[str, Any]:
@@ -65,7 +65,7 @@ def ask_user_config() -> Dict[str, Any]:
"type": "text",
"name": "stake_currency",
"message": "Please insert your stake currency:",
"default": 'USDT',
"default": "USDT",
},
{
"type": "text",
@@ -73,36 +73,38 @@ def ask_user_config() -> Dict[str, Any]:
"message": f"Please insert your stake amount (Number or '{UNLIMITED_STAKE_AMOUNT}'):",
"default": "unlimited",
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),
"filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"'
if val == UNLIMITED_STAKE_AMOUNT
else val
"filter": lambda val: (
'"' + UNLIMITED_STAKE_AMOUNT + '"' if val == UNLIMITED_STAKE_AMOUNT else val
),
},
{
"type": "text",
"name": "max_open_trades",
"message": "Please insert max_open_trades (Integer or -1 for unlimited open trades):",
"default": "3",
"validate": lambda val: validate_is_int(val)
"validate": lambda val: validate_is_int(val),
},
{
"type": "select",
"name": "timeframe_in_config",
"message": "Time",
"choices": ["Have the strategy define timeframe.", "Override in configuration."]
"choices": ["Have the strategy define timeframe.", "Override in configuration."],
},
{
"type": "text",
"name": "timeframe",
"message": "Please insert your desired timeframe (e.g. 5m):",
"default": "5m",
"when": lambda x: x["timeframe_in_config"] == 'Override in configuration.'
"when": lambda x: x["timeframe_in_config"] == "Override in configuration.",
},
{
"type": "text",
"name": "fiat_display_currency",
"message": "Please insert your display Currency (for reporting):",
"default": 'USD',
"message": (
"Please insert your display Currency for reporting "
"(leave empty to disable FIAT conversion):"
),
"default": "USD",
},
{
"type": "select",
@@ -111,6 +113,7 @@ def ask_user_config() -> Dict[str, Any]:
"choices": [
"binance",
"binanceus",
"bingx",
"gate",
"htx",
"kraken",
@@ -125,33 +128,33 @@ def ask_user_config() -> Dict[str, Any]:
"name": "trading_mode",
"message": "Do you want to trade Perpetual Swaps (perpetual futures)?",
"default": False,
"filter": lambda val: 'futures' if val else 'spot',
"when": lambda x: x["exchange_name"] in ['binance', 'gate', 'okx'],
"filter": lambda val: "futures" if val else "spot",
"when": lambda x: x["exchange_name"] in ["binance", "gate", "okx", "bybit"],
},
{
"type": "autocomplete",
"name": "exchange_name",
"message": "Type your exchange name (Must be supported by ccxt)",
"choices": available_exchanges(),
"when": lambda x: x["exchange_name"] == 'other'
"when": lambda x: x["exchange_name"] == "other",
},
{
"type": "password",
"name": "exchange_key",
"message": "Insert Exchange Key",
"when": lambda x: not x['dry_run']
"when": lambda x: not x["dry_run"],
},
{
"type": "password",
"name": "exchange_secret",
"message": "Insert Exchange Secret",
"when": lambda x: not x['dry_run']
"when": lambda x: not x["dry_run"],
},
{
"type": "password",
"name": "exchange_key_password",
"message": "Insert Exchange API Key password",
"when": lambda x: not x['dry_run'] and x['exchange_name'] in ('kucoin', 'okx')
"when": lambda x: not x["dry_run"] and x["exchange_name"] in ("kucoin", "okx"),
},
{
"type": "confirm",
@@ -163,13 +166,13 @@ def ask_user_config() -> Dict[str, Any]:
"type": "password",
"name": "telegram_token",
"message": "Insert Telegram token",
"when": lambda x: x['telegram']
"when": lambda x: x["telegram"],
},
{
"type": "password",
"name": "telegram_chat_id",
"message": "Insert Telegram chat id",
"when": lambda x: x['telegram']
"when": lambda x: x["telegram"],
},
{
"type": "confirm",
@@ -180,23 +183,25 @@ def ask_user_config() -> Dict[str, Any]:
{
"type": "text",
"name": "api_server_listen_addr",
"message": ("Insert Api server Listen Address (0.0.0.0 for docker, "
"otherwise best left untouched)"),
"message": (
"Insert Api server Listen Address (0.0.0.0 for docker, "
"otherwise best left untouched)"
),
"default": "127.0.0.1" if not running_in_docker() else "0.0.0.0",
"when": lambda x: x['api_server']
"when": lambda x: x["api_server"],
},
{
"type": "text",
"name": "api_server_username",
"message": "Insert api-server username",
"default": "freqtrader",
"when": lambda x: x['api_server']
"when": lambda x: x["api_server"],
},
{
"type": "password",
"name": "api_server_password",
"message": "Insert api-server password",
"when": lambda x: x['api_server']
"when": lambda x: x["api_server"],
},
]
answers = prompt(questions)
@@ -205,15 +210,11 @@ def ask_user_config() -> Dict[str, Any]:
# Interrupted questionary sessions return an empty dict.
raise OperationalException("User interrupted interactive questions.")
# Ensure default is set for non-futures exchanges
answers['trading_mode'] = answers.get('trading_mode', "spot")
answers['margin_mode'] = (
'isolated'
if answers.get('trading_mode') == 'futures'
else ''
)
answers["trading_mode"] = answers.get("trading_mode", "spot")
answers["margin_mode"] = "isolated" if answers.get("trading_mode") == "futures" else ""
# Force JWT token to be a random string
answers['api_server_jwt_key'] = secrets.token_hex()
answers['api_server_ws_token'] = secrets.token_urlsafe(25)
answers["api_server_jwt_key"] = secrets.token_hex()
answers["api_server_ws_token"] = secrets.token_urlsafe(25)
return answers
@@ -225,26 +226,26 @@ def deploy_new_config(config_path: Path, selections: Dict[str, Any]) -> None:
:param selections: Dict containing selections taken by the user.
"""
from jinja2.exceptions import TemplateNotFound
try:
exchange_template = MAP_EXCHANGE_CHILDCLASS.get(
selections['exchange_name'], selections['exchange_name'])
selections["exchange_name"], selections["exchange_name"]
)
selections['exchange'] = render_template(
templatefile=f"subtemplates/exchange_{exchange_template}.j2",
arguments=selections
selections["exchange"] = render_template(
templatefile=f"subtemplates/exchange_{exchange_template}.j2", arguments=selections
)
except TemplateNotFound:
selections['exchange'] = render_template(
templatefile="subtemplates/exchange_generic.j2",
arguments=selections
selections["exchange"] = render_template(
templatefile="subtemplates/exchange_generic.j2", arguments=selections
)
config_text = render_template(templatefile='base_config.json.j2',
arguments=selections)
config_text = render_template(templatefile="base_config.json.j2", arguments=selections)
logger.info(f"Writing config to `{config_path}`.")
logger.info(
"Please make sure to check the configuration contents and adjust settings to your needs.")
"Please make sure to check the configuration contents and adjust settings to your needs."
)
config_path.write_text(config_text)
@@ -255,7 +256,7 @@ def start_new_config(args: Dict[str, Any]) -> None:
Asking the user questions to fill out the template accordingly.
"""
config_path = Path(args['config'][0])
config_path = Path(args["config"][0])
chown_user_directory(config_path.parent)
if config_path.exists():
overwrite = ask_user_overwrite(config_path)
@@ -264,22 +265,22 @@ def start_new_config(args: Dict[str, Any]) -> None:
else:
raise OperationalException(
f"Configuration file `{config_path}` already exists. "
"Please delete it or use a different configuration file name.")
"Please delete it or use a different configuration file name."
)
selections = ask_user_config()
deploy_new_config(config_path, selections)
def start_show_config(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE, set_dry=False)
# TODO: Sanitize from sensitive info before printing
print("Your combined configuration is:")
config_sanitized = sanitize_config(
config['original_config'],
show_sensitive=args.get('show_sensitive', False)
config["original_config"], show_sensitive=args.get("show_sensitive", False)
)
from rich import print_json
print_json(data=config_sanitized)

File diff suppressed because it is too large Load Diff

View File

@@ -5,8 +5,11 @@ from typing import Any, Dict
from freqtrade.configuration import TimeRange, setup_utils_configuration
from freqtrade.constants import DATETIME_PRINT_FORMAT, DL_DATA_TIMEFRAMES, Config
from freqtrade.data.converter import (convert_ohlcv_format, convert_trades_format,
convert_trades_to_ohlcv)
from freqtrade.data.converter import (
convert_ohlcv_format,
convert_trades_format,
convert_trades_to_ohlcv,
)
from freqtrade.data.history import download_data_main
from freqtrade.enums import CandleType, RunMode, TradingMode
from freqtrade.exceptions import ConfigurationError
@@ -20,14 +23,17 @@ logger = logging.getLogger(__name__)
def _check_data_config_download_sanity(config: Config) -> None:
if 'days' in config and 'timerange' in config:
raise ConfigurationError("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
if "days" in config and "timerange" in config:
raise ConfigurationError(
"--days and --timerange are mutually exclusive. "
"You can only specify one or the other."
)
if 'pairs' not in config:
if "pairs" not in config:
raise ConfigurationError(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
"Please check the documentation on how to configure this."
)
def start_download_data(args: Dict[str, Any]) -> None:
@@ -46,38 +52,41 @@ def start_download_data(args: Dict[str, Any]) -> None:
def start_convert_trades(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
timerange = TimeRange()
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
config["stake_currency"] = ""
if 'timeframes' not in config:
config['timeframes'] = DL_DATA_TIMEFRAMES
if "timeframes" not in config:
config["timeframes"] = DL_DATA_TIMEFRAMES
# Init exchange
exchange = ExchangeResolver.load_exchange(config, validate=False)
# Manual validations of relevant settings
for timeframe in config['timeframes']:
for timeframe in config["timeframes"]:
exchange.validate_timeframes(timeframe)
available_pairs = [
p for p in exchange.get_markets(
tradable_only=True, active_only=not config.get('include_inactive')
).keys()
p
for p in exchange.get_markets(
tradable_only=True, active_only=not config.get("include_inactive")
).keys()
]
expanded_pairs = dynamic_expand_pairlist(config, available_pairs)
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
candle_type=config.get('candle_type_def', CandleType.SPOT)
pairs=expanded_pairs,
timeframes=config["timeframes"],
datadir=config["datadir"],
timerange=timerange,
erase=bool(config.get("erase")),
data_format_ohlcv=config["dataformat_ohlcv"],
data_format_trades=config["dataformat_trades"],
candle_type=config.get("candle_type_def", CandleType.SPOT),
)
@@ -88,14 +97,19 @@ def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
migrate_data(config)
convert_ohlcv_format(config,
convert_from=args['format_from'],
convert_to=args['format_to'],
erase=args['erase'])
convert_ohlcv_format(
config,
convert_from=args["format_from"],
convert_to=args["format_to"],
erase=args["erase"],
)
else:
convert_trades_format(config,
convert_from=args['format_from_trades'], convert_to=args['format_to'],
erase=args['erase'])
convert_trades_format(
config,
convert_from=args["format_from_trades"],
convert_to=args["format_to"],
erase=args["erase"],
)
def start_list_data(args: Dict[str, Any]) -> None:
@@ -108,45 +122,59 @@ def start_list_data(args: Dict[str, Any]) -> None:
from tabulate import tabulate
from freqtrade.data.history import get_datahandler
dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])
dhc = get_datahandler(config["datadir"], config["dataformat_ohlcv"])
paircombs = dhc.ohlcv_get_available_data(
config['datadir'],
config.get('trading_mode', TradingMode.SPOT)
)
config["datadir"], config.get("trading_mode", TradingMode.SPOT)
)
if args['pairs']:
paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]
if args["pairs"]:
paircombs = [comb for comb in paircombs if comb[0] in args["pairs"]]
print(f"Found {len(paircombs)} pair / timeframe combinations.")
if not config.get('show_timerange'):
if not config.get("show_timerange"):
groupedpair = defaultdict(list)
for pair, timeframe, candle_type in sorted(
paircombs,
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
):
groupedpair[(pair, candle_type)].append(timeframe)
if groupedpair:
print(tabulate([
(pair, ', '.join(timeframes), candle_type)
for (pair, candle_type), timeframes in groupedpair.items()
],
headers=("Pair", "Timeframe", "Type"),
tablefmt='psql', stralign='right'))
print(
tabulate(
[
(pair, ", ".join(timeframes), candle_type)
for (pair, candle_type), timeframes in groupedpair.items()
],
headers=("Pair", "Timeframe", "Type"),
tablefmt="psql",
stralign="right",
)
)
else:
paircombs1 = [(
pair, timeframe, candle_type,
*dhc.ohlcv_data_min_max(pair, timeframe, candle_type)
) for pair, timeframe, candle_type in paircombs]
paircombs1 = [
(pair, timeframe, candle_type, *dhc.ohlcv_data_min_max(pair, timeframe, candle_type))
for pair, timeframe, candle_type in paircombs
]
print(tabulate([
(pair, timeframe, candle_type,
start.strftime(DATETIME_PRINT_FORMAT),
end.strftime(DATETIME_PRINT_FORMAT), length)
for pair, timeframe, candle_type, start, end, length in sorted(
paircombs1,
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]))
],
headers=("Pair", "Timeframe", "Type", 'From', 'To', 'Candles'),
tablefmt='psql', stralign='right'))
print(
tabulate(
[
(
pair,
timeframe,
candle_type,
start.strftime(DATETIME_PRINT_FORMAT),
end.strftime(DATETIME_PRINT_FORMAT),
length,
)
for pair, timeframe, candle_type, start, end, length in sorted(
paircombs1, key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
)
],
headers=("Pair", "Timeframe", "Type", "From", "To", "Candles"),
tablefmt="psql",
stralign="right",
)
)

View File

@@ -19,9 +19,9 @@ def start_convert_db(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
init_db(config['db_url'])
init_db(config["db_url"])
session_target = Trade.session
init_db(config['db_url_from'])
init_db(config["db_url_from"])
logger.info("Starting db migration.")
trade_count = 0
@@ -47,9 +47,11 @@ def start_convert_db(args: Dict[str, Any]) -> None:
max_order_id = session_target.scalar(select(func.max(Order.id)))
max_pairlock_id = session_target.scalar(select(func.max(PairLock.id)))
set_sequence_ids(session_target.get_bind(),
trade_id=max_trade_id,
order_id=max_order_id,
pairlock_id=max_pairlock_id)
set_sequence_ids(
session_target.get_bind(),
trade_id=max_trade_id,
order_id=max_order_id,
pairlock_id=max_pairlock_id,
)
logger.info(f"Migrated {trade_count} Trades, and {pairlock_count} Pairlocks.")

View File

@@ -38,7 +38,7 @@ def deploy_new_strategy(strategy_name: str, strategy_path: Path, subtemplate: st
"""
Deploy new strategy from template to strategy_path
"""
fallback = 'full'
fallback = "full"
attributes = render_template_with_fallback(
templatefile=f"strategy_subtemplates/strategy_attributes_{subtemplate}.j2",
templatefallbackfile=f"strategy_subtemplates/strategy_attributes_{fallback}.j2",
@@ -64,33 +64,35 @@ def deploy_new_strategy(strategy_name: str, strategy_path: Path, subtemplate: st
templatefallbackfile="strategy_subtemplates/strategy_methods_empty.j2",
)
strategy_text = render_template(templatefile='base_strategy.py.j2',
arguments={"strategy": strategy_name,
"attributes": attributes,
"indicators": indicators,
"buy_trend": buy_trend,
"sell_trend": sell_trend,
"plot_config": plot_config,
"additional_methods": additional_methods,
})
strategy_text = render_template(
templatefile="base_strategy.py.j2",
arguments={
"strategy": strategy_name,
"attributes": attributes,
"indicators": indicators,
"buy_trend": buy_trend,
"sell_trend": sell_trend,
"plot_config": plot_config,
"additional_methods": additional_methods,
},
)
logger.info(f"Writing strategy to `{strategy_path}`.")
strategy_path.write_text(strategy_text)
def start_new_strategy(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if "strategy" in args and args["strategy"]:
new_path = config['user_data_dir'] / USERPATH_STRATEGIES / (args['strategy'] + '.py')
new_path = config["user_data_dir"] / USERPATH_STRATEGIES / (args["strategy"] + ".py")
if new_path.exists():
raise OperationalException(f"`{new_path}` already exists. "
"Please choose another Strategy Name.")
raise OperationalException(
f"`{new_path}` already exists. Please choose another Strategy Name."
)
deploy_new_strategy(args['strategy'], new_path, args['template'])
deploy_new_strategy(args["strategy"], new_path, args["template"])
else:
raise ConfigurationError("`new-strategy` requires --strategy to be set.")
@@ -100,8 +102,8 @@ def clean_ui_subdir(directory: Path):
if directory.is_dir():
logger.info("Removing UI directory content.")
for p in reversed(list(directory.glob('**/*'))): # iterate contents from leaves to root
if p.name in ('.gitkeep', 'fallback_file.html'):
for p in reversed(list(directory.glob("**/*"))): # iterate contents from leaves to root
if p.name in (".gitkeep", "fallback_file.html"):
continue
if p.is_file():
p.unlink()
@@ -110,11 +112,11 @@ def clean_ui_subdir(directory: Path):
def read_ui_version(dest_folder: Path) -> Optional[str]:
file = dest_folder / '.uiversion'
file = dest_folder / ".uiversion"
if not file.is_file():
return None
with file.open('r') as f:
with file.open("r") as f:
return f.read()
@@ -133,12 +135,12 @@ def download_and_install_ui(dest_folder: Path, dl_url: str, version: str):
destfile.mkdir(exist_ok=True)
else:
destfile.write_bytes(x.read())
with (dest_folder / '.uiversion').open('w') as f:
with (dest_folder / ".uiversion").open("w") as f:
f.write(version)
def get_ui_download_url(version: Optional[str] = None) -> Tuple[str, str]:
base_url = 'https://api.github.com/repos/freqtrade/frequi/'
base_url = "https://api.github.com/repos/freqtrade/frequi/"
# Get base UI Repo path
resp = requests.get(f"{base_url}releases", timeout=req_timeout)
@@ -146,42 +148,41 @@ def get_ui_download_url(version: Optional[str] = None) -> Tuple[str, str]:
r = resp.json()
if version:
tmp = [x for x in r if x['name'] == version]
tmp = [x for x in r if x["name"] == version]
if tmp:
latest_version = tmp[0]['name']
assets = tmp[0].get('assets', [])
latest_version = tmp[0]["name"]
assets = tmp[0].get("assets", [])
else:
raise ValueError("UI-Version not found.")
else:
latest_version = r[0]['name']
assets = r[0].get('assets', [])
dl_url = ''
latest_version = r[0]["name"]
assets = r[0].get("assets", [])
dl_url = ""
if assets and len(assets) > 0:
dl_url = assets[0]['browser_download_url']
dl_url = assets[0]["browser_download_url"]
# URL not found - try assets url
if not dl_url:
assets = r[0]['assets_url']
assets = r[0]["assets_url"]
resp = requests.get(assets, timeout=req_timeout)
r = resp.json()
dl_url = r[0]['browser_download_url']
dl_url = r[0]["browser_download_url"]
return dl_url, latest_version
def start_install_ui(args: Dict[str, Any]) -> None:
dest_folder = Path(__file__).parents[1] / 'rpc/api_server/ui/installed/'
dest_folder = Path(__file__).parents[1] / "rpc/api_server/ui/installed/"
# First make sure the assets are removed.
dl_url, latest_version = get_ui_download_url(args.get('ui_version'))
dl_url, latest_version = get_ui_download_url(args.get("ui_version"))
curr_version = read_ui_version(dest_folder)
if curr_version == latest_version and not args.get('erase_ui_only'):
if curr_version == latest_version and not args.get("erase_ui_only"):
logger.info(f"UI already up-to-date, FreqUI Version {curr_version}.")
return
clean_ui_subdir(dest_folder)
if args.get('erase_ui_only'):
if args.get("erase_ui_only"):
logger.info("Erased UI directory content. Not downloading new version.")
else:
# Download a new version

View File

@@ -22,15 +22,15 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
print_colorized = config.get('print_colorized', False)
print_json = config.get('print_json', False)
export_csv = config.get('export_csv')
no_details = config.get('hyperopt_list_no_details', False)
print_colorized = config.get("print_colorized", False)
print_json = config.get("print_json", False)
export_csv = config.get("export_csv")
no_details = config.get("hyperopt_list_no_details", False)
no_header = False
results_file = get_latest_hyperopt_file(
config['user_data_dir'] / 'hyperopt_results',
config.get('hyperoptexportfilename'))
config["user_data_dir"] / "hyperopt_results", config.get("hyperoptexportfilename")
)
# Previous evaluations
epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config)
@@ -40,21 +40,26 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None:
if not export_csv:
try:
print(HyperoptTools.get_result_table(config, epochs, total_epochs,
not config.get('hyperopt_list_best', False),
print_colorized, 0))
print(
HyperoptTools.get_result_table(
config,
epochs,
total_epochs,
not config.get("hyperopt_list_best", False),
print_colorized,
0,
)
)
except KeyboardInterrupt:
print('User interrupted..')
print("User interrupted..")
if epochs and not no_details:
sorted_epochs = sorted(epochs, key=itemgetter('loss'))
sorted_epochs = sorted(epochs, key=itemgetter("loss"))
results = sorted_epochs[0]
HyperoptTools.show_epoch_details(results, total_epochs, print_json, no_header)
if epochs and export_csv:
HyperoptTools.export_csv_file(
config, epochs, export_csv
)
HyperoptTools.export_csv_file(config, epochs, export_csv)
def start_hyperopt_show(args: Dict[str, Any]) -> None:
@@ -65,13 +70,13 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
print_json = config.get('print_json', False)
no_header = config.get('hyperopt_show_no_header', False)
print_json = config.get("print_json", False)
no_header = config.get("hyperopt_show_no_header", False)
results_file = get_latest_hyperopt_file(
config['user_data_dir'] / 'hyperopt_results',
config.get('hyperoptexportfilename'))
config["user_data_dir"] / "hyperopt_results", config.get("hyperoptexportfilename")
)
n = config.get('hyperopt_show_index', -1)
n = config.get("hyperopt_show_index", -1)
# Previous evaluations
epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config)
@@ -80,10 +85,12 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None:
if n > filtered_epochs:
raise OperationalException(
f"The index of the epoch to show should be less than {filtered_epochs + 1}.")
f"The index of the epoch to show should be less than {filtered_epochs + 1}."
)
if n < -filtered_epochs:
raise OperationalException(
f"The index of the epoch to show should be greater than {-filtered_epochs - 1}.")
f"The index of the epoch to show should be greater than {-filtered_epochs - 1}."
)
# Translate epoch index from human-readable format to pythonic
if n > 0:
@@ -92,13 +99,18 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None:
if epochs:
val = epochs[n]
metrics = val['results_metrics']
if 'strategy_name' in metrics:
strategy_name = metrics['strategy_name']
show_backtest_result(strategy_name, metrics,
metrics['stake_currency'], config.get('backtest_breakdown', []))
metrics = val["results_metrics"]
if "strategy_name" in metrics:
strategy_name = metrics["strategy_name"]
show_backtest_result(
strategy_name,
metrics,
metrics["stake_currency"],
config.get("backtest_breakdown", []),
)
HyperoptTools.try_export_params(config, strategy_name, val)
HyperoptTools.show_epoch_details(val, total_epochs, print_json, no_header,
header_str="Epoch details")
HyperoptTools.show_epoch_details(
val, total_epochs, print_json, no_header, header_str="Epoch details"
)

View File

@@ -26,42 +26,47 @@ def start_list_exchanges(args: Dict[str, Any]) -> None:
:param args: Cli args from Arguments()
:return: None
"""
exchanges = list_available_exchanges(args['list_exchanges_all'])
exchanges = list_available_exchanges(args["list_exchanges_all"])
if args['print_one_column']:
print('\n'.join([e['name'] for e in exchanges]))
if args["print_one_column"]:
print("\n".join([e["name"] for e in exchanges]))
else:
headers = {
'name': 'Exchange name',
'supported': 'Supported',
'trade_modes': 'Markets',
'comment': 'Reason',
}
headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})
"name": "Exchange name",
"supported": "Supported",
"trade_modes": "Markets",
"comment": "Reason",
}
headers.update({"valid": "Valid"} if args["list_exchanges_all"] else {})
def build_entry(exchange: ValidExchangesType, valid: bool):
valid_entry = {'valid': exchange['valid']} if valid else {}
valid_entry = {"valid": exchange["valid"]} if valid else {}
result: Dict[str, Union[str, bool]] = {
'name': exchange['name'],
"name": exchange["name"],
**valid_entry,
'supported': 'Official' if exchange['supported'] else '',
'trade_modes': ', '.join(
(f"{a['margin_mode']} " if a['margin_mode'] else '') + a['trading_mode']
for a in exchange['trade_modes']
"supported": "Official" if exchange["supported"] else "",
"trade_modes": ", ".join(
(f"{a['margin_mode']} " if a["margin_mode"] else "") + a["trading_mode"]
for a in exchange["trade_modes"]
),
'comment': exchange['comment'],
"comment": exchange["comment"],
}
return result
if args['list_exchanges_all']:
if args["list_exchanges_all"]:
print("All exchanges supported by the ccxt library:")
exchanges = [build_entry(e, True) for e in exchanges]
else:
print("Exchanges available for Freqtrade:")
exchanges = [build_entry(e, False) for e in exchanges if e['valid'] is not False]
exchanges = [build_entry(e, False) for e in exchanges if e["valid"] is not False]
print(tabulate(exchanges, headers=headers, ))
print(
tabulate(
exchanges,
headers=headers,
)
)
def _print_objs_tabular(objs: List, print_colorized: bool) -> None:
@@ -71,26 +76,35 @@ def _print_objs_tabular(objs: List, print_colorized: bool) -> None:
yellow = Fore.YELLOW
reset = Style.RESET_ALL
else:
red = ''
yellow = ''
reset = ''
red = ""
yellow = ""
reset = ""
names = [s['name'] for s in objs]
objs_to_print = [{
'name': s['name'] if s['name'] else "--",
'location': s['location_rel'],
'status': (red + "LOAD FAILED" + reset if s['class'] is None
else "OK" if names.count(s['name']) == 1
else yellow + "DUPLICATE NAME" + reset)
} for s in objs]
names = [s["name"] for s in objs]
objs_to_print = [
{
"name": s["name"] if s["name"] else "--",
"location": s["location_rel"],
"status": (
red + "LOAD FAILED" + reset
if s["class"] is None
else "OK"
if names.count(s["name"]) == 1
else yellow + "DUPLICATE NAME" + reset
),
}
for s in objs
]
for idx, s in enumerate(objs):
if 'hyperoptable' in s:
objs_to_print[idx].update({
'hyperoptable': "Yes" if s['hyperoptable']['count'] > 0 else "No",
'buy-Params': len(s['hyperoptable'].get('buy', [])),
'sell-Params': len(s['hyperoptable'].get('sell', [])),
})
print(tabulate(objs_to_print, headers='keys', tablefmt='psql', stralign='right'))
if "hyperoptable" in s:
objs_to_print[idx].update(
{
"hyperoptable": "Yes" if s["hyperoptable"]["count"] > 0 else "No",
"buy-Params": len(s["hyperoptable"].get("buy", [])),
"sell-Params": len(s["hyperoptable"].get("sell", [])),
}
)
print(tabulate(objs_to_print, headers="keys", tablefmt="psql", stralign="right"))
def start_list_strategies(args: Dict[str, Any]) -> None:
@@ -100,19 +114,20 @@ def start_list_strategies(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
strategy_objs = StrategyResolver.search_all_objects(
config, not args['print_one_column'], config.get('recursive_strategy_search', False))
config, not args["print_one_column"], config.get("recursive_strategy_search", False)
)
# Sort alphabetically
strategy_objs = sorted(strategy_objs, key=lambda x: x['name'])
strategy_objs = sorted(strategy_objs, key=lambda x: x["name"])
for obj in strategy_objs:
if obj['class']:
obj['hyperoptable'] = obj['class'].detect_all_parameters()
if obj["class"]:
obj["hyperoptable"] = obj["class"].detect_all_parameters()
else:
obj['hyperoptable'] = {'count': 0}
obj["hyperoptable"] = {"count": 0}
if args['print_one_column']:
print('\n'.join([s['name'] for s in strategy_objs]))
if args["print_one_column"]:
print("\n".join([s["name"] for s in strategy_objs]))
else:
_print_objs_tabular(strategy_objs, config.get('print_colorized', False))
_print_objs_tabular(strategy_objs, config.get("print_colorized", False))
def start_list_freqAI_models(args: Dict[str, Any]) -> None:
@@ -121,13 +136,14 @@ def start_list_freqAI_models(args: Dict[str, Any]) -> None:
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
model_objs = FreqaiModelResolver.search_all_objects(config, not args['print_one_column'])
model_objs = FreqaiModelResolver.search_all_objects(config, not args["print_one_column"])
# Sort alphabetically
model_objs = sorted(model_objs, key=lambda x: x['name'])
if args['print_one_column']:
print('\n'.join([s['name'] for s in model_objs]))
model_objs = sorted(model_objs, key=lambda x: x["name"])
if args["print_one_column"]:
print("\n".join([s["name"] for s in model_objs]))
else:
_print_objs_tabular(model_objs, config.get('print_colorized', False))
_print_objs_tabular(model_objs, config.get("print_colorized", False))
def start_list_timeframes(args: Dict[str, Any]) -> None:
@@ -136,16 +152,18 @@ def start_list_timeframes(args: Dict[str, Any]) -> None:
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Do not use timeframe set in the config
config['timeframe'] = None
config["timeframe"] = None
# Init exchange
exchange = ExchangeResolver.load_exchange(config, validate=False)
if args['print_one_column']:
print('\n'.join(exchange.timeframes))
if args["print_one_column"]:
print("\n".join(exchange.timeframes))
else:
print(f"Timeframes available for the exchange `{exchange.name}`: "
f"{', '.join(exchange.timeframes)}")
print(
f"Timeframes available for the exchange `{exchange.name}`: "
f"{', '.join(exchange.timeframes)}"
)
def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
@@ -161,51 +179,75 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
exchange = ExchangeResolver.load_exchange(config, validate=False)
# By default only active pairs/markets are to be shown
active_only = not args.get('list_pairs_all', False)
active_only = not args.get("list_pairs_all", False)
base_currencies = args.get('base_currencies', [])
quote_currencies = args.get('quote_currencies', [])
base_currencies = args.get("base_currencies", [])
quote_currencies = args.get("quote_currencies", [])
try:
pairs = exchange.get_markets(base_currencies=base_currencies,
quote_currencies=quote_currencies,
tradable_only=pairs_only,
active_only=active_only)
pairs = exchange.get_markets(
base_currencies=base_currencies,
quote_currencies=quote_currencies,
tradable_only=pairs_only,
active_only=active_only,
)
# Sort the pairs/markets by symbol
pairs = dict(sorted(pairs.items()))
except Exception as e:
raise OperationalException(f"Cannot get markets. Reason: {e}") from e
else:
summary_str = ((f"Exchange {exchange.name} has {len(pairs)} ") +
("active " if active_only else "") +
(plural(len(pairs), "pair" if pairs_only else "market")) +
(f" with {', '.join(base_currencies)} as base "
f"{plural(len(base_currencies), 'currency', 'currencies')}"
if base_currencies else "") +
(" and" if base_currencies and quote_currencies else "") +
(f" with {', '.join(quote_currencies)} as quote "
f"{plural(len(quote_currencies), 'currency', 'currencies')}"
if quote_currencies else ""))
summary_str = (
(f"Exchange {exchange.name} has {len(pairs)} ")
+ ("active " if active_only else "")
+ (plural(len(pairs), "pair" if pairs_only else "market"))
+ (
f" with {', '.join(base_currencies)} as base "
f"{plural(len(base_currencies), 'currency', 'currencies')}"
if base_currencies
else ""
)
+ (" and" if base_currencies and quote_currencies else "")
+ (
f" with {', '.join(quote_currencies)} as quote "
f"{plural(len(quote_currencies), 'currency', 'currencies')}"
if quote_currencies
else ""
)
)
headers = ["Id", "Symbol", "Base", "Quote", "Active",
"Spot", "Margin", "Future", "Leverage"]
headers = [
"Id",
"Symbol",
"Base",
"Quote",
"Active",
"Spot",
"Margin",
"Future",
"Leverage",
]
tabular_data = [{
'Id': v['id'],
'Symbol': v['symbol'],
'Base': v['base'],
'Quote': v['quote'],
'Active': market_is_active(v),
'Spot': 'Spot' if exchange.market_is_spot(v) else '',
'Margin': 'Margin' if exchange.market_is_margin(v) else '',
'Future': 'Future' if exchange.market_is_future(v) else '',
'Leverage': exchange.get_max_leverage(v['symbol'], 20)
} for _, v in pairs.items()]
tabular_data = [
{
"Id": v["id"],
"Symbol": v["symbol"],
"Base": v["base"],
"Quote": v["quote"],
"Active": market_is_active(v),
"Spot": "Spot" if exchange.market_is_spot(v) else "",
"Margin": "Margin" if exchange.market_is_margin(v) else "",
"Future": "Future" if exchange.market_is_future(v) else "",
"Leverage": exchange.get_max_leverage(v["symbol"], 20),
}
for _, v in pairs.items()
]
if (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
if (
args.get("print_one_column", False)
or args.get("list_pairs_print_json", False)
or args.get("print_csv", False)
):
# Print summary string in the log in case of machine-readable
# regular formats.
logger.info(f"{summary_str}.")
@@ -215,24 +257,26 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
print()
if pairs:
if args.get('print_list', False):
if args.get("print_list", False):
# print data as a list, with human-readable summary
print(f"{summary_str}: {', '.join(pairs.keys())}.")
elif args.get('print_one_column', False):
print('\n'.join(pairs.keys()))
elif args.get('list_pairs_print_json', False):
elif args.get("print_one_column", False):
print("\n".join(pairs.keys()))
elif args.get("list_pairs_print_json", False):
print(rapidjson.dumps(list(pairs.keys()), default=str))
elif args.get('print_csv', False):
elif args.get("print_csv", False):
writer = csv.DictWriter(sys.stdout, fieldnames=headers)
writer.writeheader()
writer.writerows(tabular_data)
else:
# print data as a table, with the human-readable summary
print(f"{summary_str}:")
print(tabulate(tabular_data, headers='keys', tablefmt='psql', stralign='right'))
elif not (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
print(tabulate(tabular_data, headers="keys", tablefmt="psql", stralign="right"))
elif not (
args.get("print_one_column", False)
or args.get("list_pairs_print_json", False)
or args.get("print_csv", False)
):
print(f"{summary_str}.")
@@ -243,21 +287,22 @@ def start_show_trades(args: Dict[str, Any]) -> None:
import json
from freqtrade.persistence import Trade, init_db
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if 'db_url' not in config:
if "db_url" not in config:
raise ConfigurationError("--db-url is required for this command.")
logger.info(f'Using DB: "{parse_db_uri_for_logging(config["db_url"])}"')
init_db(config['db_url'])
init_db(config["db_url"])
tfilter = []
if config.get('trade_ids'):
tfilter.append(Trade.id.in_(config['trade_ids']))
if config.get("trade_ids"):
tfilter.append(Trade.id.in_(config["trade_ids"]))
trades = Trade.get_trades(tfilter).all()
logger.info(f"Printing {len(trades)} Trades: ")
if config.get('print_json', False):
if config.get("print_json", False):
print(json.dumps([trade.to_json() for trade in trades], indent=4))
else:
for trade in trades:

View File

@@ -21,20 +21,22 @@ def setup_optimize_configuration(args: Dict[str, Any], method: RunMode) -> Dict[
config = setup_utils_configuration(args, method)
no_unlimited_runmodes = {
RunMode.BACKTEST: 'backtesting',
RunMode.HYPEROPT: 'hyperoptimization',
RunMode.BACKTEST: "backtesting",
RunMode.HYPEROPT: "hyperoptimization",
}
if method in no_unlimited_runmodes.keys():
wallet_size = config['dry_run_wallet'] * config['tradable_balance_ratio']
wallet_size = config["dry_run_wallet"] * config["tradable_balance_ratio"]
# tradable_balance_ratio
if (config['stake_amount'] != constants.UNLIMITED_STAKE_AMOUNT
and config['stake_amount'] > wallet_size):
wallet = fmt_coin(wallet_size, config['stake_currency'])
stake = fmt_coin(config['stake_amount'], config['stake_currency'])
if (
config["stake_amount"] != constants.UNLIMITED_STAKE_AMOUNT
and config["stake_amount"] > wallet_size
):
wallet = fmt_coin(wallet_size, config["stake_currency"])
stake = fmt_coin(config["stake_amount"], config["stake_currency"])
raise ConfigurationError(
f"Starting balance ({wallet}) is smaller than stake_amount {stake}. "
f"Wallet is calculated as `dry_run_wallet * tradable_balance_ratio`."
)
)
return config
@@ -51,7 +53,7 @@ def start_backtesting(args: Dict[str, Any]) -> None:
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.BACKTEST)
logger.info('Starting freqtrade in Backtesting mode')
logger.info("Starting freqtrade in Backtesting mode")
# Initialize backtesting object
backtesting = Backtesting(config)
@@ -68,7 +70,7 @@ def start_backtesting_show(args: Dict[str, Any]) -> None:
from freqtrade.data.btanalysis import load_backtest_stats
from freqtrade.optimize.optimize_reports import show_backtest_results, show_sorted_pairlist
results = load_backtest_stats(config['exportfilename'])
results = load_backtest_stats(config["exportfilename"])
show_backtest_results(config, results)
show_sorted_pairlist(config, results)
@@ -87,20 +89,20 @@ def start_hyperopt(args: Dict[str, Any]) -> None:
from freqtrade.optimize.hyperopt import Hyperopt
except ImportError as e:
raise OperationalException(
f"{e}. Please ensure that the hyperopt dependencies are installed.") from e
f"{e}. Please ensure that the hyperopt dependencies are installed."
) from e
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.HYPEROPT)
logger.info('Starting freqtrade in Hyperopt mode')
logger.info("Starting freqtrade in Hyperopt mode")
lock = FileLock(Hyperopt.get_lock_filename(config))
try:
with lock.acquire(timeout=1):
# Remove noisy log messages
logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)
logging.getLogger('filelock').setLevel(logging.WARNING)
logging.getLogger("hyperopt.tpe").setLevel(logging.WARNING)
logging.getLogger("filelock").setLevel(logging.WARNING)
# Initialize backtesting object
hyperopt = Hyperopt(config)
@@ -108,9 +110,11 @@ def start_hyperopt(args: Dict[str, Any]) -> None:
except Timeout:
logger.info("Another running instance of freqtrade Hyperopt detected.")
logger.info("Simultaneous execution of multiple Hyperopt commands is not supported. "
"Hyperopt module is resource hungry. Please run your Hyperopt sequentially "
"or on separate machines.")
logger.info(
"Simultaneous execution of multiple Hyperopt commands is not supported. "
"Hyperopt module is resource hungry. Please run your Hyperopt sequentially "
"or on separate machines."
)
logger.info("Quitting now.")
# TODO: return False here in order to help freqtrade to exit
# with non-zero exit code...
@@ -127,7 +131,7 @@ def start_edge(args: Dict[str, Any]) -> None:
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.EDGE)
logger.info('Starting freqtrade in Edge mode')
logger.info("Starting freqtrade in Edge mode")
# Initialize Edge object
edge_cli = EdgeCli(config)

View File

@@ -17,28 +17,29 @@ def start_test_pairlist(args: Dict[str, Any]) -> None:
"""
from freqtrade.persistence import FtNoDBContext
from freqtrade.plugins.pairlistmanager import PairListManager
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
exchange = ExchangeResolver.load_exchange(config, validate=False)
quote_currencies = args.get('quote_currencies')
quote_currencies = args.get("quote_currencies")
if not quote_currencies:
quote_currencies = [config.get('stake_currency')]
quote_currencies = [config.get("stake_currency")]
results = {}
with FtNoDBContext():
for curr in quote_currencies:
config['stake_currency'] = curr
config["stake_currency"] = curr
pairlists = PairListManager(exchange, config)
pairlists.refresh_pairlist()
results[curr] = pairlists.whitelist
for curr, pairlist in results.items():
if not args.get('print_one_column', False) and not args.get('list_pairs_print_json', False):
if not args.get("print_one_column", False) and not args.get("list_pairs_print_json", False):
print(f"Pairs for {curr}: ")
if args.get('print_one_column', False):
print('\n'.join(pairlist))
elif args.get('list_pairs_print_json', False):
if args.get("print_one_column", False):
print("\n".join(pairlist))
elif args.get("list_pairs_print_json", False):
print(rapidjson.dumps(list(pairlist), default=str))
else:
print(pairlist)

View File

@@ -6,10 +6,11 @@ from freqtrade.exceptions import ConfigurationError
def validate_plot_args(args: Dict[str, Any]) -> None:
if not args.get('datadir') and not args.get('config'):
if not args.get("datadir") and not args.get("config"):
raise ConfigurationError(
"You need to specify either `--datadir` or `--config` "
"for plot-profit and plot-dataframe.")
"for plot-profit and plot-dataframe."
)
def start_plot_dataframe(args: Dict[str, Any]) -> None:
@@ -18,6 +19,7 @@ def start_plot_dataframe(args: Dict[str, Any]) -> None:
"""
# Import here to avoid errors if plot-dependencies are not installed.
from freqtrade.plot.plotting import load_and_plot_trades
validate_plot_args(args)
config = setup_utils_configuration(args, RunMode.PLOT)
@@ -30,6 +32,7 @@ def start_plot_profit(args: Dict[str, Any]) -> None:
"""
# Import here to avoid errors if plot-dependencies are not installed.
from freqtrade.plot.plotting import plot_profit
validate_plot_args(args)
config = setup_utils_configuration(args, RunMode.PLOT)

View File

@@ -26,13 +26,15 @@ def start_strategy_update(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
strategy_objs = StrategyResolver.search_all_objects(
config, enum_failed=False, recursive=config.get('recursive_strategy_search', False))
config, enum_failed=False, recursive=config.get("recursive_strategy_search", False)
)
filtered_strategy_objs = []
if args['strategy_list']:
if args["strategy_list"]:
filtered_strategy_objs = [
strategy_obj for strategy_obj in strategy_objs
if strategy_obj['name'] in args['strategy_list']
strategy_obj
for strategy_obj in strategy_objs
if strategy_obj["name"] in args["strategy_list"]
]
else:
@@ -41,8 +43,8 @@ def start_strategy_update(args: Dict[str, Any]) -> None:
processed_locations = set()
for strategy_obj in filtered_strategy_objs:
if strategy_obj['location'] not in processed_locations:
processed_locations.add(strategy_obj['location'])
if strategy_obj["location"] not in processed_locations:
processed_locations.add(strategy_obj["location"])
start_conversion(strategy_obj, config)

View File

@@ -24,13 +24,13 @@ def sanitize_config(config: Config, *, show_sensitive: bool = False) -> Config:
]
config = deepcopy(config)
for key in keys_to_remove:
if '.' in key:
nested_keys = key.split('.')
if "." in key:
nested_keys = key.split(".")
nested_config = config
for nested_key in nested_keys[:-1]:
nested_config = nested_config.get(nested_key, {})
nested_config[nested_keys[-1]] = 'REDACTED'
nested_config[nested_keys[-1]] = "REDACTED"
else:
config[key] = 'REDACTED'
config[key] = "REDACTED"
return config

View File

@@ -11,7 +11,8 @@ logger = logging.getLogger(__name__)
def setup_utils_configuration(
args: Dict[str, Any], method: RunMode, *, set_dry: bool = True) -> Dict[str, Any]:
args: Dict[str, Any], method: RunMode, *, set_dry: bool = True
) -> Dict[str, Any]:
"""
Prepare the configuration for utils subcommands
:param args: Cli args from Arguments()
@@ -23,7 +24,7 @@ def setup_utils_configuration(
# Ensure these modes are using Dry-run
if set_dry:
config['dry_run'] = True
config["dry_run"] = True
validate_config_consistency(config, preliminary=True)
return config

View File

@@ -20,18 +20,16 @@ def _extend_validator(validator_class):
Extended validator for the Freqtrade configuration JSON Schema.
Currently it only handles defaults for subschemas.
"""
validate_properties = validator_class.VALIDATORS['properties']
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for prop, subschema in properties.items():
if 'default' in subschema:
instance.setdefault(prop, subschema['default'])
if "default" in subschema:
instance.setdefault(prop, subschema["default"])
yield from validate_properties(validator, properties, instance, schema)
return validators.extend(
validator_class, {'properties': set_defaults}
)
return validators.extend(validator_class, {"properties": set_defaults})
FreqtradeValidator = _extend_validator(Draft4Validator)
@@ -44,27 +42,23 @@ def validate_config_schema(conf: Dict[str, Any], preliminary: bool = False) -> D
:return: Returns the config if valid, otherwise throw an exception
"""
conf_schema = deepcopy(constants.CONF_SCHEMA)
if conf.get('runmode', RunMode.OTHER) in (RunMode.DRY_RUN, RunMode.LIVE):
conf_schema['required'] = constants.SCHEMA_TRADE_REQUIRED
elif conf.get('runmode', RunMode.OTHER) in (RunMode.BACKTEST, RunMode.HYPEROPT):
if conf.get("runmode", RunMode.OTHER) in (RunMode.DRY_RUN, RunMode.LIVE):
conf_schema["required"] = constants.SCHEMA_TRADE_REQUIRED
elif conf.get("runmode", RunMode.OTHER) in (RunMode.BACKTEST, RunMode.HYPEROPT):
if preliminary:
conf_schema['required'] = constants.SCHEMA_BACKTEST_REQUIRED
conf_schema["required"] = constants.SCHEMA_BACKTEST_REQUIRED
else:
conf_schema['required'] = constants.SCHEMA_BACKTEST_REQUIRED_FINAL
elif conf.get('runmode', RunMode.OTHER) == RunMode.WEBSERVER:
conf_schema['required'] = constants.SCHEMA_MINIMAL_WEBSERVER
conf_schema["required"] = constants.SCHEMA_BACKTEST_REQUIRED_FINAL
elif conf.get("runmode", RunMode.OTHER) == RunMode.WEBSERVER:
conf_schema["required"] = constants.SCHEMA_MINIMAL_WEBSERVER
else:
conf_schema['required'] = constants.SCHEMA_MINIMAL_REQUIRED
conf_schema["required"] = constants.SCHEMA_MINIMAL_REQUIRED
try:
FreqtradeValidator(conf_schema).validate(conf)
return conf
except ValidationError as e:
logger.critical(
f"Invalid configuration. Reason: {e}"
)
raise ValidationError(
best_match(Draft4Validator(conf_schema).iter_errors(conf)).message
)
logger.critical(f"Invalid configuration. Reason: {e}")
raise ValidationError(best_match(Draft4Validator(conf_schema).iter_errors(conf)).message)
def validate_config_consistency(conf: Dict[str, Any], *, preliminary: bool = False) -> None:
@@ -91,7 +85,7 @@ def validate_config_consistency(conf: Dict[str, Any], *, preliminary: bool = Fal
validate_migrated_strategy_settings(conf)
# validate configuration before returning
logger.info('Validating configuration ...')
logger.info("Validating configuration ...")
validate_config_schema(conf, preliminary=preliminary)
@@ -100,9 +94,11 @@ def _validate_unlimited_amount(conf: Dict[str, Any]) -> None:
If edge is disabled, either max_open_trades or stake_amount need to be set.
:raise: ConfigurationError if config validation failed
"""
if (not conf.get('edge', {}).get('enabled')
and conf.get('max_open_trades') == float('inf')
and conf.get('stake_amount') == constants.UNLIMITED_STAKE_AMOUNT):
if (
not conf.get("edge", {}).get("enabled")
and conf.get("max_open_trades") == float("inf")
and conf.get("stake_amount") == constants.UNLIMITED_STAKE_AMOUNT
):
raise ConfigurationError("`max_open_trades` and `stake_amount` cannot both be unlimited.")
@@ -111,45 +107,47 @@ def _validate_price_config(conf: Dict[str, Any]) -> None:
When using market orders, price sides must be using the "other" side of the price
"""
# TODO: The below could be an enforced setting when using market orders
if (conf.get('order_types', {}).get('entry') == 'market'
and conf.get('entry_pricing', {}).get('price_side') not in ('ask', 'other')):
raise ConfigurationError(
'Market entry orders require entry_pricing.price_side = "other".')
if conf.get("order_types", {}).get("entry") == "market" and conf.get("entry_pricing", {}).get(
"price_side"
) not in ("ask", "other"):
raise ConfigurationError('Market entry orders require entry_pricing.price_side = "other".')
if (conf.get('order_types', {}).get('exit') == 'market'
and conf.get('exit_pricing', {}).get('price_side') not in ('bid', 'other')):
if conf.get("order_types", {}).get("exit") == "market" and conf.get("exit_pricing", {}).get(
"price_side"
) not in ("bid", "other"):
raise ConfigurationError('Market exit orders require exit_pricing.price_side = "other".')
def _validate_trailing_stoploss(conf: Dict[str, Any]) -> None:
if conf.get('stoploss') == 0.0:
if conf.get("stoploss") == 0.0:
raise ConfigurationError(
'The config stoploss needs to be different from 0 to avoid problems with sell orders.'
"The config stoploss needs to be different from 0 to avoid problems with sell orders."
)
# Skip if trailing stoploss is not activated
if not conf.get('trailing_stop', False):
if not conf.get("trailing_stop", False):
return
tsl_positive = float(conf.get('trailing_stop_positive', 0))
tsl_offset = float(conf.get('trailing_stop_positive_offset', 0))
tsl_only_offset = conf.get('trailing_only_offset_is_reached', False)
tsl_positive = float(conf.get("trailing_stop_positive", 0))
tsl_offset = float(conf.get("trailing_stop_positive_offset", 0))
tsl_only_offset = conf.get("trailing_only_offset_is_reached", False)
if tsl_only_offset:
if tsl_positive == 0.0:
raise ConfigurationError(
'The config trailing_only_offset_is_reached needs '
'trailing_stop_positive_offset to be more than 0 in your config.')
"The config trailing_only_offset_is_reached needs "
"trailing_stop_positive_offset to be more than 0 in your config."
)
if tsl_positive > 0 and 0 < tsl_offset <= tsl_positive:
raise ConfigurationError(
'The config trailing_stop_positive_offset needs '
'to be greater than trailing_stop_positive in your config.')
"The config trailing_stop_positive_offset needs "
"to be greater than trailing_stop_positive in your config."
)
# Fetch again without default
if 'trailing_stop_positive' in conf and float(conf['trailing_stop_positive']) == 0.0:
if "trailing_stop_positive" in conf and float(conf["trailing_stop_positive"]) == 0.0:
raise ConfigurationError(
'The config trailing_stop_positive needs to be different from 0 '
'to avoid problems with sell orders.'
"The config trailing_stop_positive needs to be different from 0 "
"to avoid problems with sell orders."
)
@@ -158,10 +156,10 @@ def _validate_edge(conf: Dict[str, Any]) -> None:
Edge and Dynamic whitelist should not both be enabled, since edge overrides dynamic whitelists.
"""
if not conf.get('edge', {}).get('enabled'):
if not conf.get("edge", {}).get("enabled"):
return
if not conf.get('use_exit_signal', True):
if not conf.get("use_exit_signal", True):
raise ConfigurationError(
"Edge requires `use_exit_signal` to be True, otherwise no sells will happen."
)
@@ -171,13 +169,20 @@ def _validate_whitelist(conf: Dict[str, Any]) -> None:
"""
Dynamic whitelist does not require pair_whitelist to be set - however StaticWhitelist does.
"""
if conf.get('runmode', RunMode.OTHER) in [RunMode.OTHER, RunMode.PLOT,
RunMode.UTIL_NO_EXCHANGE, RunMode.UTIL_EXCHANGE]:
if conf.get("runmode", RunMode.OTHER) in [
RunMode.OTHER,
RunMode.PLOT,
RunMode.UTIL_NO_EXCHANGE,
RunMode.UTIL_EXCHANGE,
]:
return
for pl in conf.get('pairlists', [{'method': 'StaticPairList'}]):
if (isinstance(pl, dict) and pl.get('method') == 'StaticPairList'
and not conf.get('exchange', {}).get('pair_whitelist')):
for pl in conf.get("pairlists", [{"method": "StaticPairList"}]):
if (
isinstance(pl, dict)
and pl.get("method") == "StaticPairList"
and not conf.get("exchange", {}).get("pair_whitelist")
):
raise ConfigurationError("StaticPairList requires pair_whitelist to be set.")
@@ -186,14 +191,14 @@ def _validate_protections(conf: Dict[str, Any]) -> None:
Validate protection configuration validity
"""
for prot in conf.get('protections', []):
if ('stop_duration' in prot and 'stop_duration_candles' in prot):
for prot in conf.get("protections", []):
if "stop_duration" in prot and "stop_duration_candles" in prot:
raise ConfigurationError(
"Protections must specify either `stop_duration` or `stop_duration_candles`.\n"
f"Please fix the protection {prot.get('method')}"
)
if ('lookback_period' in prot and 'lookback_period_candles' in prot):
if "lookback_period" in prot and "lookback_period_candles" in prot:
raise ConfigurationError(
"Protections must specify either `lookback_period` or `lookback_period_candles`.\n"
f"Please fix the protection {prot.get('method')}"
@@ -201,10 +206,10 @@ def _validate_protections(conf: Dict[str, Any]) -> None:
def _validate_ask_orderbook(conf: Dict[str, Any]) -> None:
ask_strategy = conf.get('exit_pricing', {})
ob_min = ask_strategy.get('order_book_min')
ob_max = ask_strategy.get('order_book_max')
if ob_min is not None and ob_max is not None and ask_strategy.get('use_order_book'):
ask_strategy = conf.get("exit_pricing", {})
ob_min = ask_strategy.get("order_book_min")
ob_max = ask_strategy.get("order_book_max")
if ob_min is not None and ob_max is not None and ask_strategy.get("use_order_book"):
if ob_min != ob_max:
raise ConfigurationError(
"Using order_book_max != order_book_min in exit_pricing is no longer supported."
@@ -212,7 +217,7 @@ def _validate_ask_orderbook(conf: Dict[str, Any]) -> None:
)
else:
# Move value to order_book_top
ask_strategy['order_book_top'] = ob_min
ask_strategy["order_book_top"] = ob_min
logger.warning(
"DEPRECATED: "
"Please use `order_book_top` instead of `order_book_min` and `order_book_max` "
@@ -221,7 +226,6 @@ def _validate_ask_orderbook(conf: Dict[str, Any]) -> None:
def validate_migrated_strategy_settings(conf: Dict[str, Any]) -> None:
_validate_time_in_force(conf)
_validate_order_types(conf)
_validate_unfilledtimeout(conf)
@@ -230,119 +234,129 @@ def validate_migrated_strategy_settings(conf: Dict[str, Any]) -> None:
def _validate_time_in_force(conf: Dict[str, Any]) -> None:
time_in_force = conf.get('order_time_in_force', {})
if 'buy' in time_in_force or 'sell' in time_in_force:
if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:
time_in_force = conf.get("order_time_in_force", {})
if "buy" in time_in_force or "sell" in time_in_force:
if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT:
raise ConfigurationError(
"Please migrate your time_in_force settings to use 'entry' and 'exit'.")
"Please migrate your time_in_force settings to use 'entry' and 'exit'."
)
else:
logger.warning(
"DEPRECATED: Using 'buy' and 'sell' for time_in_force is deprecated."
"Please migrate your time_in_force settings to use 'entry' and 'exit'."
)
process_deprecated_setting(
conf, 'order_time_in_force', 'buy', 'order_time_in_force', 'entry')
conf, "order_time_in_force", "buy", "order_time_in_force", "entry"
)
process_deprecated_setting(
conf, 'order_time_in_force', 'sell', 'order_time_in_force', 'exit')
conf, "order_time_in_force", "sell", "order_time_in_force", "exit"
)
def _validate_order_types(conf: Dict[str, Any]) -> None:
order_types = conf.get('order_types', {})
old_order_types = ['buy', 'sell', 'emergencysell', 'forcebuy',
'forcesell', 'emergencyexit', 'forceexit', 'forceentry']
order_types = conf.get("order_types", {})
old_order_types = [
"buy",
"sell",
"emergencysell",
"forcebuy",
"forcesell",
"emergencyexit",
"forceexit",
"forceentry",
]
if any(x in order_types for x in old_order_types):
if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:
if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT:
raise ConfigurationError(
"Please migrate your order_types settings to use the new wording.")
"Please migrate your order_types settings to use the new wording."
)
else:
logger.warning(
"DEPRECATED: Using 'buy' and 'sell' for order_types is deprecated."
"Please migrate your order_types settings to use 'entry' and 'exit' wording."
)
for o, n in [
('buy', 'entry'),
('sell', 'exit'),
('emergencysell', 'emergency_exit'),
('forcesell', 'force_exit'),
('forcebuy', 'force_entry'),
('emergencyexit', 'emergency_exit'),
('forceexit', 'force_exit'),
('forceentry', 'force_entry'),
("buy", "entry"),
("sell", "exit"),
("emergencysell", "emergency_exit"),
("forcesell", "force_exit"),
("forcebuy", "force_entry"),
("emergencyexit", "emergency_exit"),
("forceexit", "force_exit"),
("forceentry", "force_entry"),
]:
process_deprecated_setting(conf, 'order_types', o, 'order_types', n)
process_deprecated_setting(conf, "order_types", o, "order_types", n)
def _validate_unfilledtimeout(conf: Dict[str, Any]) -> None:
unfilledtimeout = conf.get('unfilledtimeout', {})
if any(x in unfilledtimeout for x in ['buy', 'sell']):
if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:
unfilledtimeout = conf.get("unfilledtimeout", {})
if any(x in unfilledtimeout for x in ["buy", "sell"]):
if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT:
raise ConfigurationError(
"Please migrate your unfilledtimeout settings to use the new wording.")
"Please migrate your unfilledtimeout settings to use the new wording."
)
else:
logger.warning(
"DEPRECATED: Using 'buy' and 'sell' for unfilledtimeout is deprecated."
"Please migrate your unfilledtimeout settings to use 'entry' and 'exit' wording."
)
for o, n in [
('buy', 'entry'),
('sell', 'exit'),
("buy", "entry"),
("sell", "exit"),
]:
process_deprecated_setting(conf, 'unfilledtimeout', o, 'unfilledtimeout', n)
process_deprecated_setting(conf, "unfilledtimeout", o, "unfilledtimeout", n)
def _validate_pricing_rules(conf: Dict[str, Any]) -> None:
if conf.get('ask_strategy') or conf.get('bid_strategy'):
if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:
raise ConfigurationError(
"Please migrate your pricing settings to use the new wording.")
if conf.get("ask_strategy") or conf.get("bid_strategy"):
if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT:
raise ConfigurationError("Please migrate your pricing settings to use the new wording.")
else:
logger.warning(
"DEPRECATED: Using 'ask_strategy' and 'bid_strategy' is deprecated."
"Please migrate your settings to use 'entry_pricing' and 'exit_pricing'."
)
conf['entry_pricing'] = {}
for obj in list(conf.get('bid_strategy', {}).keys()):
if obj == 'ask_last_balance':
process_deprecated_setting(conf, 'bid_strategy', obj,
'entry_pricing', 'price_last_balance')
conf["entry_pricing"] = {}
for obj in list(conf.get("bid_strategy", {}).keys()):
if obj == "ask_last_balance":
process_deprecated_setting(
conf, "bid_strategy", obj, "entry_pricing", "price_last_balance"
)
else:
process_deprecated_setting(conf, 'bid_strategy', obj, 'entry_pricing', obj)
del conf['bid_strategy']
process_deprecated_setting(conf, "bid_strategy", obj, "entry_pricing", obj)
del conf["bid_strategy"]
conf['exit_pricing'] = {}
for obj in list(conf.get('ask_strategy', {}).keys()):
if obj == 'bid_last_balance':
process_deprecated_setting(conf, 'ask_strategy', obj,
'exit_pricing', 'price_last_balance')
conf["exit_pricing"] = {}
for obj in list(conf.get("ask_strategy", {}).keys()):
if obj == "bid_last_balance":
process_deprecated_setting(
conf, "ask_strategy", obj, "exit_pricing", "price_last_balance"
)
else:
process_deprecated_setting(conf, 'ask_strategy', obj, 'exit_pricing', obj)
del conf['ask_strategy']
process_deprecated_setting(conf, "ask_strategy", obj, "exit_pricing", obj)
del conf["ask_strategy"]
def _validate_freqai_hyperopt(conf: Dict[str, Any]) -> None:
freqai_enabled = conf.get('freqai', {}).get('enabled', False)
analyze_per_epoch = conf.get('analyze_per_epoch', False)
freqai_enabled = conf.get("freqai", {}).get("enabled", False)
analyze_per_epoch = conf.get("analyze_per_epoch", False)
if analyze_per_epoch and freqai_enabled:
raise ConfigurationError(
'Using analyze-per-epoch parameter is not supported with a FreqAI strategy.')
"Using analyze-per-epoch parameter is not supported with a FreqAI strategy."
)
def _validate_freqai_include_timeframes(conf: Dict[str, Any], preliminary: bool) -> None:
freqai_enabled = conf.get('freqai', {}).get('enabled', False)
freqai_enabled = conf.get("freqai", {}).get("enabled", False)
if freqai_enabled:
main_tf = conf.get('timeframe', '5m')
freqai_include_timeframes = conf.get('freqai', {}).get('feature_parameters', {}
).get('include_timeframes', [])
main_tf = conf.get("timeframe", "5m")
freqai_include_timeframes = (
conf.get("freqai", {}).get("feature_parameters", {}).get("include_timeframes", [])
)
from freqtrade.exchange import timeframe_to_seconds
main_tf_s = timeframe_to_seconds(main_tf)
offending_lines = []
for tf in freqai_include_timeframes:
@@ -352,57 +366,65 @@ def _validate_freqai_include_timeframes(conf: Dict[str, Any], preliminary: bool)
if offending_lines:
raise ConfigurationError(
f"Main timeframe of {main_tf} must be smaller or equal to FreqAI "
f"`include_timeframes`.Offending include-timeframes: {', '.join(offending_lines)}")
f"`include_timeframes`.Offending include-timeframes: {', '.join(offending_lines)}"
)
# Ensure that the base timeframe is included in the include_timeframes list
if not preliminary and main_tf not in freqai_include_timeframes:
feature_parameters = conf.get('freqai', {}).get('feature_parameters', {})
feature_parameters = conf.get("freqai", {}).get("feature_parameters", {})
include_timeframes = [main_tf] + freqai_include_timeframes
conf.get('freqai', {}).get('feature_parameters', {}) \
.update({**feature_parameters, 'include_timeframes': include_timeframes})
conf.get("freqai", {}).get("feature_parameters", {}).update(
{**feature_parameters, "include_timeframes": include_timeframes}
)
def _validate_freqai_backtest(conf: Dict[str, Any]) -> None:
if conf.get('runmode', RunMode.OTHER) == RunMode.BACKTEST:
freqai_enabled = conf.get('freqai', {}).get('enabled', False)
timerange = conf.get('timerange')
freqai_backtest_live_models = conf.get('freqai_backtest_live_models', False)
if conf.get("runmode", RunMode.OTHER) == RunMode.BACKTEST:
freqai_enabled = conf.get("freqai", {}).get("enabled", False)
timerange = conf.get("timerange")
freqai_backtest_live_models = conf.get("freqai_backtest_live_models", False)
if freqai_backtest_live_models and freqai_enabled and timerange:
raise ConfigurationError(
'Using timerange parameter is not supported with '
'--freqai-backtest-live-models parameter.')
"Using timerange parameter is not supported with "
"--freqai-backtest-live-models parameter."
)
if freqai_backtest_live_models and not freqai_enabled:
raise ConfigurationError(
'Using --freqai-backtest-live-models parameter is only '
'supported with a FreqAI strategy.')
"Using --freqai-backtest-live-models parameter is only "
"supported with a FreqAI strategy."
)
if freqai_enabled and not freqai_backtest_live_models and not timerange:
raise ConfigurationError(
'Please pass --timerange if you intend to use FreqAI for backtesting.')
"Please pass --timerange if you intend to use FreqAI for backtesting."
)
def _validate_consumers(conf: Dict[str, Any]) -> None:
emc_conf = conf.get('external_message_consumer', {})
if emc_conf.get('enabled', False):
if len(emc_conf.get('producers', [])) < 1:
emc_conf = conf.get("external_message_consumer", {})
if emc_conf.get("enabled", False):
if len(emc_conf.get("producers", [])) < 1:
raise ConfigurationError("You must specify at least 1 Producer to connect to.")
producer_names = [p['name'] for p in emc_conf.get('producers', [])]
producer_names = [p["name"] for p in emc_conf.get("producers", [])]
duplicates = [item for item, count in Counter(producer_names).items() if count > 1]
if duplicates:
raise ConfigurationError(
f"Producer names must be unique. Duplicate: {', '.join(duplicates)}")
if conf.get('process_only_new_candles', True):
f"Producer names must be unique. Duplicate: {', '.join(duplicates)}"
)
if conf.get("process_only_new_candles", True):
# Warning here or require it?
logger.warning("To receive best performance with external data, "
"please set `process_only_new_candles` to False")
logger.warning(
"To receive best performance with external data, "
"please set `process_only_new_candles` to False"
)
def _strategy_settings(conf: Dict[str, Any]) -> None:
process_deprecated_setting(conf, None, 'use_sell_signal', None, 'use_exit_signal')
process_deprecated_setting(conf, None, 'sell_profit_only', None, 'exit_profit_only')
process_deprecated_setting(conf, None, 'sell_profit_offset', None, 'exit_profit_offset')
process_deprecated_setting(conf, None, 'ignore_roi_if_buy_signal',
None, 'ignore_roi_if_entry_signal')
process_deprecated_setting(conf, None, "use_sell_signal", None, "use_exit_signal")
process_deprecated_setting(conf, None, "sell_profit_only", None, "exit_profit_only")
process_deprecated_setting(conf, None, "sell_profit_offset", None, "exit_profit_offset")
process_deprecated_setting(
conf, None, "ignore_roi_if_buy_signal", None, "ignore_roi_if_entry_signal"
)

View File

@@ -1,6 +1,7 @@
"""
This module contains the configuration class
"""
import logging
import warnings
from copy import deepcopy
@@ -56,7 +57,7 @@ class Configuration:
:return: configuration dictionary
"""
# Keep this method as staticmethod, so it can be used from interactive environments
c = Configuration({'config': files}, RunMode.OTHER)
c = Configuration({"config": files}, RunMode.OTHER)
return c.get_config()
def load_config(self) -> Dict[str, Any]:
@@ -69,19 +70,20 @@ class Configuration:
# Load environment variables
from freqtrade.commands.arguments import NO_CONF_ALLOWED
if self.args.get('command') not in NO_CONF_ALLOWED:
if self.args.get("command") not in NO_CONF_ALLOWED:
env_data = enironment_vars_to_dict()
config = deep_merge_dicts(env_data, config)
# Normalize config
if 'internals' not in config:
config['internals'] = {}
if "internals" not in config:
config["internals"] = {}
if 'pairlists' not in config:
config['pairlists'] = []
if "pairlists" not in config:
config["pairlists"] = []
# Keep a copy of the original configuration file
config['original_config'] = deepcopy(config)
config["original_config"] = deepcopy(config)
self._process_logging_options(config)
@@ -105,7 +107,7 @@ class Configuration:
from freqtrade.exchange.check_exchange import check_exchange
# Check if the exchange set by the user is supported
check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True))
check_exchange(config, config.get("experimental", {}).get("block_bad_exchanges", True))
self._resolve_pairs_list(config)
@@ -119,52 +121,56 @@ class Configuration:
the -v/--verbose, --logfile options
"""
# Log level
config.update({'verbosity': self.args.get('verbosity', 0)})
config.update({"verbosity": self.args.get("verbosity", 0)})
if 'logfile' in self.args and self.args['logfile']:
config.update({'logfile': self.args['logfile']})
if "logfile" in self.args and self.args["logfile"]:
config.update({"logfile": self.args["logfile"]})
setup_logging(config)
def _process_trading_options(self, config: Config) -> None:
if config['runmode'] not in TRADE_MODES:
if config["runmode"] not in TRADE_MODES:
return
if config.get('dry_run', False):
logger.info('Dry run is enabled')
if config.get('db_url') in [None, constants.DEFAULT_DB_PROD_URL]:
if config.get("dry_run", False):
logger.info("Dry run is enabled")
if config.get("db_url") in [None, constants.DEFAULT_DB_PROD_URL]:
# Default to in-memory db for dry_run if not specified
config['db_url'] = constants.DEFAULT_DB_DRYRUN_URL
config["db_url"] = constants.DEFAULT_DB_DRYRUN_URL
else:
if not config.get('db_url'):
config['db_url'] = constants.DEFAULT_DB_PROD_URL
logger.info('Dry run is disabled')
if not config.get("db_url"):
config["db_url"] = constants.DEFAULT_DB_PROD_URL
logger.info("Dry run is disabled")
logger.info(f'Using DB: "{parse_db_uri_for_logging(config["db_url"])}"')
def _process_common_options(self, config: Config) -> None:
# Set strategy if not specified in config and or if it's non default
if self.args.get('strategy') or not config.get('strategy'):
config.update({'strategy': self.args.get('strategy')})
if self.args.get("strategy") or not config.get("strategy"):
config.update({"strategy": self.args.get("strategy")})
self._args_to_config(config, argname='strategy_path',
logstring='Using additional Strategy lookup path: {}')
self._args_to_config(
config, argname="strategy_path", logstring="Using additional Strategy lookup path: {}"
)
if ('db_url' in self.args and self.args['db_url'] and
self.args['db_url'] != constants.DEFAULT_DB_PROD_URL):
config.update({'db_url': self.args['db_url']})
logger.info('Parameter --db-url detected ...')
if (
"db_url" in self.args
and self.args["db_url"]
and self.args["db_url"] != constants.DEFAULT_DB_PROD_URL
):
config.update({"db_url": self.args["db_url"]})
logger.info("Parameter --db-url detected ...")
self._args_to_config(config, argname='db_url_from',
logstring='Parameter --db-url-from detected ...')
self._args_to_config(
config, argname="db_url_from", logstring="Parameter --db-url-from detected ..."
)
if config.get('force_entry_enable', False):
logger.warning('`force_entry_enable` RPC message enabled.')
if config.get("force_entry_enable", False):
logger.warning("`force_entry_enable` RPC message enabled.")
# Support for sd_notify
if 'sd_notify' in self.args and self.args['sd_notify']:
config['internals'].update({'sd_notify': True})
if "sd_notify" in self.args and self.args["sd_notify"]:
config["internals"].update({"sd_notify": True})
def _process_datadir_options(self, config: Config) -> None:
"""
@@ -172,245 +178,275 @@ class Configuration:
--user-data, --datadir
"""
# Check exchange parameter here - otherwise `datadir` might be wrong.
if 'exchange' in self.args and self.args['exchange']:
config['exchange']['name'] = self.args['exchange']
if "exchange" in self.args and self.args["exchange"]:
config["exchange"]["name"] = self.args["exchange"]
logger.info(f"Using exchange {config['exchange']['name']}")
if 'pair_whitelist' not in config['exchange']:
config['exchange']['pair_whitelist'] = []
if "pair_whitelist" not in config["exchange"]:
config["exchange"]["pair_whitelist"] = []
if 'user_data_dir' in self.args and self.args['user_data_dir']:
config.update({'user_data_dir': self.args['user_data_dir']})
elif 'user_data_dir' not in config:
if "user_data_dir" in self.args and self.args["user_data_dir"]:
config.update({"user_data_dir": self.args["user_data_dir"]})
elif "user_data_dir" not in config:
# Default to cwd/user_data (legacy option ...)
config.update({'user_data_dir': str(Path.cwd() / 'user_data')})
config.update({"user_data_dir": str(Path.cwd() / "user_data")})
# reset to user_data_dir so this contains the absolute path.
config['user_data_dir'] = create_userdata_dir(config['user_data_dir'], create_dir=False)
logger.info('Using user-data directory: %s ...', config['user_data_dir'])
config["user_data_dir"] = create_userdata_dir(config["user_data_dir"], create_dir=False)
logger.info("Using user-data directory: %s ...", config["user_data_dir"])
config.update({'datadir': create_datadir(config, self.args.get('datadir'))})
logger.info('Using data directory: %s ...', config.get('datadir'))
config.update({"datadir": create_datadir(config, self.args.get("datadir"))})
logger.info("Using data directory: %s ...", config.get("datadir"))
if self.args.get('exportfilename'):
self._args_to_config(config, argname='exportfilename',
logstring='Storing backtest results to {} ...')
config['exportfilename'] = Path(config['exportfilename'])
if self.args.get("exportfilename"):
self._args_to_config(
config, argname="exportfilename", logstring="Storing backtest results to {} ..."
)
config["exportfilename"] = Path(config["exportfilename"])
else:
config['exportfilename'] = (config['user_data_dir']
/ 'backtest_results')
config["exportfilename"] = config["user_data_dir"] / "backtest_results"
if self.args.get('show_sensitive'):
if self.args.get("show_sensitive"):
logger.warning(
"Sensitive information will be shown in the upcoming output. "
"Please make sure to never share this output without redacting "
"the information yourself.")
"the information yourself."
)
def _process_optimize_options(self, config: Config) -> None:
# This will override the strategy configuration
self._args_to_config(config, argname='timeframe',
logstring='Parameter -i/--timeframe detected ... '
'Using timeframe: {} ...')
self._args_to_config(config, argname='position_stacking',
logstring='Parameter --enable-position-stacking detected ...')
self._args_to_config(
config,
argname="timeframe",
logstring="Parameter -i/--timeframe detected ... Using timeframe: {} ...",
)
self._args_to_config(
config, argname='enable_protections',
logstring='Parameter --enable-protections detected, enabling Protections. ...')
config,
argname="position_stacking",
logstring="Parameter --enable-position-stacking detected ...",
)
if 'use_max_market_positions' in self.args and not self.args["use_max_market_positions"]:
config.update({'use_max_market_positions': False})
logger.info('Parameter --disable-max-market-positions detected ...')
logger.info('max_open_trades set to unlimited ...')
elif 'max_open_trades' in self.args and self.args['max_open_trades']:
config.update({'max_open_trades': self.args['max_open_trades']})
logger.info('Parameter --max-open-trades detected, '
'overriding max_open_trades to: %s ...', config.get('max_open_trades'))
elif config['runmode'] in NON_UTIL_MODES:
logger.info('Using max_open_trades: %s ...', config.get('max_open_trades'))
self._args_to_config(
config,
argname="enable_protections",
logstring="Parameter --enable-protections detected, enabling Protections. ...",
)
if "use_max_market_positions" in self.args and not self.args["use_max_market_positions"]:
config.update({"use_max_market_positions": False})
logger.info("Parameter --disable-max-market-positions detected ...")
logger.info("max_open_trades set to unlimited ...")
elif "max_open_trades" in self.args and self.args["max_open_trades"]:
config.update({"max_open_trades": self.args["max_open_trades"]})
logger.info(
"Parameter --max-open-trades detected, overriding max_open_trades to: %s ...",
config.get("max_open_trades"),
)
elif config["runmode"] in NON_UTIL_MODES:
logger.info("Using max_open_trades: %s ...", config.get("max_open_trades"))
# Setting max_open_trades to infinite if -1
if config.get('max_open_trades') == -1:
config['max_open_trades'] = float('inf')
if config.get("max_open_trades") == -1:
config["max_open_trades"] = float("inf")
if self.args.get('stake_amount'):
if self.args.get("stake_amount"):
# Convert explicitly to float to support CLI argument for both unlimited and value
try:
self.args['stake_amount'] = float(self.args['stake_amount'])
self.args["stake_amount"] = float(self.args["stake_amount"])
except ValueError:
pass
configurations = [
('timeframe_detail',
'Parameter --timeframe-detail detected, using {} for intra-candle backtesting ...'),
('backtest_show_pair_list', 'Parameter --show-pair-list detected.'),
('stake_amount',
'Parameter --stake-amount detected, overriding stake_amount to: {} ...'),
('dry_run_wallet',
'Parameter --dry-run-wallet detected, overriding dry_run_wallet to: {} ...'),
('fee', 'Parameter --fee detected, setting fee to: {} ...'),
('timerange', 'Parameter --timerange detected: {} ...'),
]
(
"timeframe_detail",
"Parameter --timeframe-detail detected, using {} for intra-candle backtesting ...",
),
("backtest_show_pair_list", "Parameter --show-pair-list detected."),
(
"stake_amount",
"Parameter --stake-amount detected, overriding stake_amount to: {} ...",
),
(
"dry_run_wallet",
"Parameter --dry-run-wallet detected, overriding dry_run_wallet to: {} ...",
),
("fee", "Parameter --fee detected, setting fee to: {} ..."),
("timerange", "Parameter --timerange detected: {} ..."),
]
self._args_to_config_loop(config, configurations)
self._process_datadir_options(config)
self._args_to_config(config, argname='strategy_list',
logstring='Using strategy list of {} strategies', logfun=len)
self._args_to_config(
config,
argname="strategy_list",
logstring="Using strategy list of {} strategies",
logfun=len,
)
configurations = [
('recursive_strategy_search',
'Recursively searching for a strategy in the strategies folder.'),
('timeframe', 'Overriding timeframe with Command line argument'),
('export', 'Parameter --export detected: {} ...'),
('backtest_breakdown', 'Parameter --breakdown detected ...'),
('backtest_cache', 'Parameter --cache={} detected ...'),
('disableparamexport', 'Parameter --disableparamexport detected: {} ...'),
('freqai_backtest_live_models',
'Parameter --freqai-backtest-live-models detected ...'),
(
"recursive_strategy_search",
"Recursively searching for a strategy in the strategies folder.",
),
("timeframe", "Overriding timeframe with Command line argument"),
("export", "Parameter --export detected: {} ..."),
("backtest_breakdown", "Parameter --breakdown detected ..."),
("backtest_cache", "Parameter --cache={} detected ..."),
("disableparamexport", "Parameter --disableparamexport detected: {} ..."),
("freqai_backtest_live_models", "Parameter --freqai-backtest-live-models detected ..."),
]
self._args_to_config_loop(config, configurations)
# Edge section:
if 'stoploss_range' in self.args and self.args["stoploss_range"]:
if "stoploss_range" in self.args and self.args["stoploss_range"]:
txt_range = eval(self.args["stoploss_range"])
config['edge'].update({'stoploss_range_min': txt_range[0]})
config['edge'].update({'stoploss_range_max': txt_range[1]})
config['edge'].update({'stoploss_range_step': txt_range[2]})
logger.info('Parameter --stoplosses detected: %s ...', self.args["stoploss_range"])
config["edge"].update({"stoploss_range_min": txt_range[0]})
config["edge"].update({"stoploss_range_max": txt_range[1]})
config["edge"].update({"stoploss_range_step": txt_range[2]})
logger.info("Parameter --stoplosses detected: %s ...", self.args["stoploss_range"])
# Hyperopt section
configurations = [
('hyperopt', 'Using Hyperopt class name: {}'),
('hyperopt_path', 'Using additional Hyperopt lookup path: {}'),
('hyperoptexportfilename', 'Using hyperopt file: {}'),
('lookahead_analysis_exportfilename', 'Saving lookahead analysis results into {} ...'),
('epochs', 'Parameter --epochs detected ... Will run Hyperopt with for {} epochs ...'),
('spaces', 'Parameter -s/--spaces detected: {}'),
('analyze_per_epoch', 'Parameter --analyze-per-epoch detected.'),
('print_all', 'Parameter --print-all detected ...'),
("hyperopt", "Using Hyperopt class name: {}"),
("hyperopt_path", "Using additional Hyperopt lookup path: {}"),
("hyperoptexportfilename", "Using hyperopt file: {}"),
("lookahead_analysis_exportfilename", "Saving lookahead analysis results into {} ..."),
("epochs", "Parameter --epochs detected ... Will run Hyperopt with for {} epochs ..."),
("spaces", "Parameter -s/--spaces detected: {}"),
("analyze_per_epoch", "Parameter --analyze-per-epoch detected."),
("print_all", "Parameter --print-all detected ..."),
]
self._args_to_config_loop(config, configurations)
if 'print_colorized' in self.args and not self.args["print_colorized"]:
logger.info('Parameter --no-color detected ...')
config.update({'print_colorized': False})
if "print_colorized" in self.args and not self.args["print_colorized"]:
logger.info("Parameter --no-color detected ...")
config.update({"print_colorized": False})
else:
config.update({'print_colorized': True})
config.update({"print_colorized": True})
configurations = [
('print_json', 'Parameter --print-json detected ...'),
('export_csv', 'Parameter --export-csv detected: {}'),
('hyperopt_jobs', 'Parameter -j/--job-workers detected: {}'),
('hyperopt_random_state', 'Parameter --random-state detected: {}'),
('hyperopt_min_trades', 'Parameter --min-trades detected: {}'),
('hyperopt_loss', 'Using Hyperopt loss class name: {}'),
('hyperopt_show_index', 'Parameter -n/--index detected: {}'),
('hyperopt_list_best', 'Parameter --best detected: {}'),
('hyperopt_list_profitable', 'Parameter --profitable detected: {}'),
('hyperopt_list_min_trades', 'Parameter --min-trades detected: {}'),
('hyperopt_list_max_trades', 'Parameter --max-trades detected: {}'),
('hyperopt_list_min_avg_time', 'Parameter --min-avg-time detected: {}'),
('hyperopt_list_max_avg_time', 'Parameter --max-avg-time detected: {}'),
('hyperopt_list_min_avg_profit', 'Parameter --min-avg-profit detected: {}'),
('hyperopt_list_max_avg_profit', 'Parameter --max-avg-profit detected: {}'),
('hyperopt_list_min_total_profit', 'Parameter --min-total-profit detected: {}'),
('hyperopt_list_max_total_profit', 'Parameter --max-total-profit detected: {}'),
('hyperopt_list_min_objective', 'Parameter --min-objective detected: {}'),
('hyperopt_list_max_objective', 'Parameter --max-objective detected: {}'),
('hyperopt_list_no_details', 'Parameter --no-details detected: {}'),
('hyperopt_show_no_header', 'Parameter --no-header detected: {}'),
('hyperopt_ignore_missing_space', 'Paramter --ignore-missing-space detected: {}'),
("print_json", "Parameter --print-json detected ..."),
("export_csv", "Parameter --export-csv detected: {}"),
("hyperopt_jobs", "Parameter -j/--job-workers detected: {}"),
("hyperopt_random_state", "Parameter --random-state detected: {}"),
("hyperopt_min_trades", "Parameter --min-trades detected: {}"),
("hyperopt_loss", "Using Hyperopt loss class name: {}"),
("hyperopt_show_index", "Parameter -n/--index detected: {}"),
("hyperopt_list_best", "Parameter --best detected: {}"),
("hyperopt_list_profitable", "Parameter --profitable detected: {}"),
("hyperopt_list_min_trades", "Parameter --min-trades detected: {}"),
("hyperopt_list_max_trades", "Parameter --max-trades detected: {}"),
("hyperopt_list_min_avg_time", "Parameter --min-avg-time detected: {}"),
("hyperopt_list_max_avg_time", "Parameter --max-avg-time detected: {}"),
("hyperopt_list_min_avg_profit", "Parameter --min-avg-profit detected: {}"),
("hyperopt_list_max_avg_profit", "Parameter --max-avg-profit detected: {}"),
("hyperopt_list_min_total_profit", "Parameter --min-total-profit detected: {}"),
("hyperopt_list_max_total_profit", "Parameter --max-total-profit detected: {}"),
("hyperopt_list_min_objective", "Parameter --min-objective detected: {}"),
("hyperopt_list_max_objective", "Parameter --max-objective detected: {}"),
("hyperopt_list_no_details", "Parameter --no-details detected: {}"),
("hyperopt_show_no_header", "Parameter --no-header detected: {}"),
("hyperopt_ignore_missing_space", "Parameter --ignore-missing-space detected: {}"),
]
self._args_to_config_loop(config, configurations)
def _process_plot_options(self, config: Config) -> None:
configurations = [
('pairs', 'Using pairs {}'),
('indicators1', 'Using indicators1: {}'),
('indicators2', 'Using indicators2: {}'),
('trade_ids', 'Filtering on trade_ids: {}'),
('plot_limit', 'Limiting plot to: {}'),
('plot_auto_open', 'Parameter --auto-open detected.'),
('trade_source', 'Using trades from: {}'),
('prepend_data', 'Prepend detected. Allowing data prepending.'),
('erase', 'Erase detected. Deleting existing data.'),
('no_trades', 'Parameter --no-trades detected.'),
('timeframes', 'timeframes --timeframes: {}'),
('days', 'Detected --days: {}'),
('include_inactive', 'Detected --include-inactive-pairs: {}'),
('download_trades', 'Detected --dl-trades: {}'),
('dataformat_ohlcv', 'Using "{}" to store OHLCV data.'),
('dataformat_trades', 'Using "{}" to store trades data.'),
('show_timerange', 'Detected --show-timerange'),
("pairs", "Using pairs {}"),
("indicators1", "Using indicators1: {}"),
("indicators2", "Using indicators2: {}"),
("trade_ids", "Filtering on trade_ids: {}"),
("plot_limit", "Limiting plot to: {}"),
("plot_auto_open", "Parameter --auto-open detected."),
("trade_source", "Using trades from: {}"),
("prepend_data", "Prepend detected. Allowing data prepending."),
("erase", "Erase detected. Deleting existing data."),
("no_trades", "Parameter --no-trades detected."),
("timeframes", "timeframes --timeframes: {}"),
("days", "Detected --days: {}"),
("include_inactive", "Detected --include-inactive-pairs: {}"),
("download_trades", "Detected --dl-trades: {}"),
("convert_trades", "Detected --convert: {} - Converting Trade data to OHCV {}"),
("dataformat_ohlcv", 'Using "{}" to store OHLCV data.'),
("dataformat_trades", 'Using "{}" to store trades data.'),
("show_timerange", "Detected --show-timerange"),
]
self._args_to_config_loop(config, configurations)
def _process_data_options(self, config: Config) -> None:
self._args_to_config(config, argname='new_pairs_days',
logstring='Detected --new-pairs-days: {}')
self._args_to_config(config, argname='trading_mode',
logstring='Detected --trading-mode: {}')
config['candle_type_def'] = CandleType.get_default(
config.get('trading_mode', 'spot') or 'spot')
config['trading_mode'] = TradingMode(config.get('trading_mode', 'spot') or 'spot')
self._args_to_config(config, argname='candle_types',
logstring='Detected --candle-types: {}')
self._args_to_config(
config, argname="new_pairs_days", logstring="Detected --new-pairs-days: {}"
)
self._args_to_config(
config, argname="trading_mode", logstring="Detected --trading-mode: {}"
)
config["candle_type_def"] = CandleType.get_default(
config.get("trading_mode", "spot") or "spot"
)
config["trading_mode"] = TradingMode(config.get("trading_mode", "spot") or "spot")
self._args_to_config(
config, argname="candle_types", logstring="Detected --candle-types: {}"
)
def _process_analyze_options(self, config: Config) -> None:
configurations = [
('analysis_groups', 'Analysis reason groups: {}'),
('enter_reason_list', 'Analysis enter tag list: {}'),
('exit_reason_list', 'Analysis exit tag list: {}'),
('indicator_list', 'Analysis indicator list: {}'),
('timerange', 'Filter trades by timerange: {}'),
('analysis_rejected', 'Analyse rejected signals: {}'),
('analysis_to_csv', 'Store analysis tables to CSV: {}'),
('analysis_csv_path', 'Path to store analysis CSVs: {}'),
("analysis_groups", "Analysis reason groups: {}"),
("enter_reason_list", "Analysis enter tag list: {}"),
("exit_reason_list", "Analysis exit tag list: {}"),
("indicator_list", "Analysis indicator list: {}"),
("timerange", "Filter trades by timerange: {}"),
("analysis_rejected", "Analyse rejected signals: {}"),
("analysis_to_csv", "Store analysis tables to CSV: {}"),
("analysis_csv_path", "Path to store analysis CSVs: {}"),
# Lookahead analysis results
('targeted_trade_amount', 'Targeted Trade amount: {}'),
('minimum_trade_amount', 'Minimum Trade amount: {}'),
('lookahead_analysis_exportfilename', 'Path to store lookahead-analysis-results: {}'),
('startup_candle', 'Startup candle to be used on recursive analysis: {}'),
("targeted_trade_amount", "Targeted Trade amount: {}"),
("minimum_trade_amount", "Minimum Trade amount: {}"),
("lookahead_analysis_exportfilename", "Path to store lookahead-analysis-results: {}"),
("startup_candle", "Startup candle to be used on recursive analysis: {}"),
]
self._args_to_config_loop(config, configurations)
def _args_to_config_loop(self, config, configurations: List[Tuple[str, str]]) -> None:
for argname, logstring in configurations:
self._args_to_config(config, argname=argname, logstring=logstring)
def _process_runmode(self, config: Config) -> None:
self._args_to_config(config, argname='dry_run',
logstring='Parameter --dry-run detected, '
'overriding dry_run to: {} ...')
self._args_to_config(
config,
argname="dry_run",
logstring="Parameter --dry-run detected, overriding dry_run to: {} ...",
)
if not self.runmode:
# Handle real mode, infer dry/live from config
self.runmode = RunMode.DRY_RUN if config.get('dry_run', True) else RunMode.LIVE
self.runmode = RunMode.DRY_RUN if config.get("dry_run", True) else RunMode.LIVE
logger.info(f"Runmode set to {self.runmode.value}.")
config.update({'runmode': self.runmode})
config.update({"runmode": self.runmode})
def _process_freqai_options(self, config: Config) -> None:
self._args_to_config(
config, argname="freqaimodel", logstring="Using freqaimodel class name: {}"
)
self._args_to_config(config, argname='freqaimodel',
logstring='Using freqaimodel class name: {}')
self._args_to_config(config, argname='freqaimodel_path',
logstring='Using freqaimodel path: {}')
self._args_to_config(
config, argname="freqaimodel_path", logstring="Using freqaimodel path: {}"
)
return
def _args_to_config(self, config: Config, argname: str,
logstring: str, logfun: Optional[Callable] = None,
deprecated_msg: Optional[str] = None) -> None:
def _args_to_config(
self,
config: Config,
argname: str,
logstring: str,
logfun: Optional[Callable] = None,
deprecated_msg: Optional[str] = None,
) -> None:
"""
:param config: Configuration dictionary
:param argname: Argumentname in self.args - will be copied to config dict.
@@ -420,9 +456,11 @@ class Configuration:
sample: logfun=len (prints the length of the found
configuration instead of the content)
"""
if (argname in self.args and self.args[argname] is not None
and self.args[argname] is not False):
if (
argname in self.args
and self.args[argname] is not None
and self.args[argname] is not False
):
config.update({argname: self.args[argname]})
if logfun:
logger.info(logstring.format(logfun(config[argname])))
@@ -441,7 +479,7 @@ class Configuration:
"""
if "pairs" in config:
config['exchange']['pair_whitelist'] = config['pairs']
config["exchange"]["pair_whitelist"] = config["pairs"]
return
if "pairs_file" in self.args and self.args["pairs_file"]:
@@ -451,19 +489,19 @@ class Configuration:
# or if pairs file is specified explicitly
if not pairs_file.exists():
raise OperationalException(f'No pairs file found with path "{pairs_file}".')
config['pairs'] = load_file(pairs_file)
if isinstance(config['pairs'], list):
config['pairs'].sort()
config["pairs"] = load_file(pairs_file)
if isinstance(config["pairs"], list):
config["pairs"].sort()
return
if 'config' in self.args and self.args['config']:
if "config" in self.args and self.args["config"]:
logger.info("Using pairlist from configuration.")
config['pairs'] = config.get('exchange', {}).get('pair_whitelist')
config["pairs"] = config.get("exchange", {}).get("pair_whitelist")
else:
# Fall back to /dl_path/pairs.json
pairs_file = config['datadir'] / 'pairs.json'
pairs_file = config["datadir"] / "pairs.json"
if pairs_file.exists():
logger.info(f'Reading pairs file "{pairs_file}".')
config['pairs'] = load_file(pairs_file)
if 'pairs' in config and isinstance(config['pairs'], list):
config['pairs'].sort()
config["pairs"] = load_file(pairs_file)
if "pairs" in config and isinstance(config["pairs"], list):
config["pairs"].sort()

View File

@@ -12,9 +12,13 @@ from freqtrade.exceptions import ConfigurationError, OperationalException
logger = logging.getLogger(__name__)
def check_conflicting_settings(config: Config,
section_old: Optional[str], name_old: str,
section_new: Optional[str], name_new: str) -> None:
def check_conflicting_settings(
config: Config,
section_old: Optional[str],
name_old: str,
section_new: Optional[str],
name_new: str,
) -> None:
section_new_config = config.get(section_new, {}) if section_new else config
section_old_config = config.get(section_old, {}) if section_old else config
if name_new in section_new_config and name_old in section_old_config:
@@ -29,9 +33,9 @@ def check_conflicting_settings(config: Config,
)
def process_removed_setting(config: Config,
section1: str, name1: str,
section2: Optional[str], name2: str) -> None:
def process_removed_setting(
config: Config, section1: str, name1: str, section2: Optional[str], name2: str
) -> None:
"""
:param section1: Removed section
:param name1: Removed setting name
@@ -48,10 +52,13 @@ def process_removed_setting(config: Config,
)
def process_deprecated_setting(config: Config,
section_old: Optional[str], name_old: str,
section_new: Optional[str], name_new: str
) -> None:
def process_deprecated_setting(
config: Config,
section_old: Optional[str],
name_old: str,
section_new: Optional[str],
name_new: str,
) -> None:
check_conflicting_settings(config, section_old, name_old, section_new, name_new)
section_old_config = config.get(section_old, {}) if section_old else config
@@ -71,57 +78,91 @@ def process_deprecated_setting(config: Config,
def process_temporary_deprecated_settings(config: Config) -> None:
# Kept for future deprecated / moved settings
# check_conflicting_settings(config, 'ask_strategy', 'use_sell_signal',
# 'experimental', 'use_sell_signal')
process_deprecated_setting(config, 'ask_strategy', 'ignore_buying_expired_candle_after',
None, 'ignore_buying_expired_candle_after')
process_deprecated_setting(
config,
"ask_strategy",
"ignore_buying_expired_candle_after",
None,
"ignore_buying_expired_candle_after",
)
process_deprecated_setting(config, None, 'forcebuy_enable', None, 'force_entry_enable')
process_deprecated_setting(config, None, "forcebuy_enable", None, "force_entry_enable")
# New settings
if config.get('telegram'):
process_deprecated_setting(config['telegram'], 'notification_settings', 'sell',
'notification_settings', 'exit')
process_deprecated_setting(config['telegram'], 'notification_settings', 'sell_fill',
'notification_settings', 'exit_fill')
process_deprecated_setting(config['telegram'], 'notification_settings', 'sell_cancel',
'notification_settings', 'exit_cancel')
process_deprecated_setting(config['telegram'], 'notification_settings', 'buy',
'notification_settings', 'entry')
process_deprecated_setting(config['telegram'], 'notification_settings', 'buy_fill',
'notification_settings', 'entry_fill')
process_deprecated_setting(config['telegram'], 'notification_settings', 'buy_cancel',
'notification_settings', 'entry_cancel')
if config.get('webhook'):
process_deprecated_setting(config, 'webhook', 'webhookbuy', 'webhook', 'webhookentry')
process_deprecated_setting(config, 'webhook', 'webhookbuycancel',
'webhook', 'webhookentrycancel')
process_deprecated_setting(config, 'webhook', 'webhookbuyfill',
'webhook', 'webhookentryfill')
process_deprecated_setting(config, 'webhook', 'webhooksell', 'webhook', 'webhookexit')
process_deprecated_setting(config, 'webhook', 'webhooksellcancel',
'webhook', 'webhookexitcancel')
process_deprecated_setting(config, 'webhook', 'webhooksellfill',
'webhook', 'webhookexitfill')
if config.get("telegram"):
process_deprecated_setting(
config["telegram"], "notification_settings", "sell", "notification_settings", "exit"
)
process_deprecated_setting(
config["telegram"],
"notification_settings",
"sell_fill",
"notification_settings",
"exit_fill",
)
process_deprecated_setting(
config["telegram"],
"notification_settings",
"sell_cancel",
"notification_settings",
"exit_cancel",
)
process_deprecated_setting(
config["telegram"], "notification_settings", "buy", "notification_settings", "entry"
)
process_deprecated_setting(
config["telegram"],
"notification_settings",
"buy_fill",
"notification_settings",
"entry_fill",
)
process_deprecated_setting(
config["telegram"],
"notification_settings",
"buy_cancel",
"notification_settings",
"entry_cancel",
)
if config.get("webhook"):
process_deprecated_setting(config, "webhook", "webhookbuy", "webhook", "webhookentry")
process_deprecated_setting(
config, "webhook", "webhookbuycancel", "webhook", "webhookentrycancel"
)
process_deprecated_setting(
config, "webhook", "webhookbuyfill", "webhook", "webhookentryfill"
)
process_deprecated_setting(config, "webhook", "webhooksell", "webhook", "webhookexit")
process_deprecated_setting(
config, "webhook", "webhooksellcancel", "webhook", "webhookexitcancel"
)
process_deprecated_setting(
config, "webhook", "webhooksellfill", "webhook", "webhookexitfill"
)
# Legacy way - having them in experimental ...
process_removed_setting(config, 'experimental', 'use_sell_signal', None, 'use_exit_signal')
process_removed_setting(config, 'experimental', 'sell_profit_only', None, 'exit_profit_only')
process_removed_setting(config, 'experimental', 'ignore_roi_if_buy_signal',
None, 'ignore_roi_if_entry_signal')
process_removed_setting(config, "experimental", "use_sell_signal", None, "use_exit_signal")
process_removed_setting(config, "experimental", "sell_profit_only", None, "exit_profit_only")
process_removed_setting(
config, "experimental", "ignore_roi_if_buy_signal", None, "ignore_roi_if_entry_signal"
)
process_removed_setting(config, 'ask_strategy', 'use_sell_signal', None, 'use_exit_signal')
process_removed_setting(config, 'ask_strategy', 'sell_profit_only', None, 'exit_profit_only')
process_removed_setting(config, 'ask_strategy', 'sell_profit_offset',
None, 'exit_profit_offset')
process_removed_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal',
None, 'ignore_roi_if_entry_signal')
if (config.get('edge', {}).get('enabled', False)
and 'capital_available_percentage' in config.get('edge', {})):
process_removed_setting(config, "ask_strategy", "use_sell_signal", None, "use_exit_signal")
process_removed_setting(config, "ask_strategy", "sell_profit_only", None, "exit_profit_only")
process_removed_setting(
config, "ask_strategy", "sell_profit_offset", None, "exit_profit_offset"
)
process_removed_setting(
config, "ask_strategy", "ignore_roi_if_buy_signal", None, "ignore_roi_if_entry_signal"
)
if config.get("edge", {}).get(
"enabled", False
) and "capital_available_percentage" in config.get("edge", {}):
raise ConfigurationError(
"DEPRECATED: "
"Using 'edge.capital_available_percentage' has been deprecated in favor of "
@@ -129,12 +170,11 @@ def process_temporary_deprecated_settings(config: Config) -> None:
"'tradable_balance_ratio' and remove 'capital_available_percentage' "
"from the edge configuration."
)
if 'ticker_interval' in config:
if "ticker_interval" in config:
raise ConfigurationError(
"DEPRECATED: 'ticker_interval' detected. "
"Please use 'timeframe' instead of 'ticker_interval."
)
if 'protections' in config:
if "protections" in config:
logger.warning("DEPRECATED: Setting 'protections' in the configuration is deprecated.")

View File

@@ -5,4 +5,4 @@ def running_in_docker() -> bool:
"""
Check if we are running in a docker container
"""
return os.environ.get('FT_APP_ENV') == 'docker'
return os.environ.get("FT_APP_ENV") == "docker"

View File

@@ -4,8 +4,14 @@ from pathlib import Path
from typing import Optional
from freqtrade.configuration.detect_environment import running_in_docker
from freqtrade.constants import (USER_DATA_FILES, USERPATH_FREQAIMODELS, USERPATH_HYPEROPTS,
USERPATH_NOTEBOOKS, USERPATH_STRATEGIES, Config)
from freqtrade.constants import (
USER_DATA_FILES,
USERPATH_FREQAIMODELS,
USERPATH_HYPEROPTS,
USERPATH_NOTEBOOKS,
USERPATH_STRATEGIES,
Config,
)
from freqtrade.exceptions import OperationalException
@@ -13,16 +19,15 @@ logger = logging.getLogger(__name__)
def create_datadir(config: Config, datadir: Optional[str] = None) -> Path:
folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
if not datadir:
# set datadir
exchange_name = config.get('exchange', {}).get('name', '').lower()
exchange_name = config.get("exchange", {}).get("name", "").lower()
folder = folder.joinpath(exchange_name)
if not folder.is_dir():
folder.mkdir(parents=True)
logger.info(f'Created data directory: {datadir}')
logger.info(f"Created data directory: {datadir}")
return folder
@@ -34,8 +39,8 @@ def chown_user_directory(directory: Path) -> None:
if running_in_docker():
try:
import subprocess
subprocess.check_output(
['sudo', 'chown', '-R', 'ftuser:', str(directory.resolve())])
subprocess.check_output(["sudo", "chown", "-R", "ftuser:", str(directory.resolve())])
except Exception:
logger.warning(f"Could not chown {directory}")
@@ -50,18 +55,28 @@ def create_userdata_dir(directory: str, create_dir: bool = False) -> Path:
:param create_dir: Create directory if it does not exist.
:return: Path object containing the directory
"""
sub_dirs = ["backtest_results", "data", USERPATH_HYPEROPTS, "hyperopt_results", "logs",
USERPATH_NOTEBOOKS, "plot", USERPATH_STRATEGIES, USERPATH_FREQAIMODELS]
sub_dirs = [
"backtest_results",
"data",
USERPATH_HYPEROPTS,
"hyperopt_results",
"logs",
USERPATH_NOTEBOOKS,
"plot",
USERPATH_STRATEGIES,
USERPATH_FREQAIMODELS,
]
folder = Path(directory)
chown_user_directory(folder)
if not folder.is_dir():
if create_dir:
folder.mkdir(parents=True)
logger.info(f'Created user-data directory: {folder}')
logger.info(f"Created user-data directory: {folder}")
else:
raise OperationalException(
f"Directory `{folder}` does not exist. "
"Please use `freqtrade create-userdir` to create a user directory")
"Please use `freqtrade create-userdir` to create a user directory"
)
# Create required subdirectories
for f in sub_dirs:

View File

@@ -16,9 +16,9 @@ def _get_var_typed(val):
try:
return float(val)
except ValueError:
if val.lower() in ('t', 'true'):
if val.lower() in ("t", "true"):
return True
elif val.lower() in ('f', 'false'):
elif val.lower() in ("f", "false"):
return False
# keep as string
return val
@@ -32,16 +32,21 @@ def _flat_vars_to_nested_dict(env_dict: Dict[str, Any], prefix: str) -> Dict[str
:param prefix: Prefix to consider (usually FREQTRADE__)
:return: Nested dict based on available and relevant variables.
"""
no_convert = ['CHAT_ID', 'PASSWORD']
no_convert = ["CHAT_ID", "PASSWORD"]
relevant_vars: Dict[str, Any] = {}
for env_var, val in sorted(env_dict.items()):
if env_var.startswith(prefix):
logger.info(f"Loading variable '{env_var}'")
key = env_var.replace(prefix, '')
for k in reversed(key.split('__')):
val = {k.lower(): _get_var_typed(val)
if not isinstance(val, dict) and k not in no_convert else val}
key = env_var.replace(prefix, "")
for k in reversed(key.split("__")):
val = {
k.lower(): (
_get_var_typed(val)
if not isinstance(val, dict) and k not in no_convert
else val
)
}
relevant_vars = deep_merge_dicts(val, relevant_vars)
return relevant_vars

View File

@@ -1,6 +1,7 @@
"""
This module contain functions to load the configuration file
"""
import logging
import re
import sys
@@ -25,25 +26,25 @@ def log_config_error_range(path: str, errmsg: str) -> str:
"""
Parses configuration file and prints range around error
"""
if path != '-':
offsetlist = re.findall(r'(?<=Parse\serror\sat\soffset\s)\d+', errmsg)
if path != "-":
offsetlist = re.findall(r"(?<=Parse\serror\sat\soffset\s)\d+", errmsg)
if offsetlist:
offset = int(offsetlist[0])
text = Path(path).read_text()
# Fetch an offset of 80 characters around the error line
subtext = text[offset - min(80, offset):offset + 80]
segments = subtext.split('\n')
subtext = text[offset - min(80, offset) : offset + 80]
segments = subtext.split("\n")
if len(segments) > 3:
# Remove first and last lines, to avoid odd truncations
return '\n'.join(segments[1:-1])
return "\n".join(segments[1:-1])
else:
return subtext
return ''
return ""
def load_file(path: Path) -> Dict[str, Any]:
try:
with path.open('r') as file:
with path.open("r") as file:
config = rapidjson.load(file, parse_mode=CONFIG_PARSE_MODE)
except FileNotFoundError:
raise OperationalException(f'File "{path}" not found!') from None
@@ -58,25 +59,27 @@ def load_config_file(path: str) -> Dict[str, Any]:
"""
try:
# Read config from stdin if requested in the options
with Path(path).open() if path != '-' else sys.stdin as file:
with Path(path).open() if path != "-" else sys.stdin as file:
config = rapidjson.load(file, parse_mode=CONFIG_PARSE_MODE)
except FileNotFoundError:
raise OperationalException(
f'Config file "{path}" not found!'
' Please create a config file or check whether it exists.') from None
" Please create a config file or check whether it exists."
) from None
except rapidjson.JSONDecodeError as e:
err_range = log_config_error_range(path, str(e))
raise ConfigurationError(
f'{e}\n'
f'Please verify the following segment of your configuration:\n{err_range}'
if err_range else 'Please verify your configuration file for syntax errors.'
f"{e}\nPlease verify the following segment of your configuration:\n{err_range}"
if err_range
else "Please verify your configuration file for syntax errors."
)
return config
def load_from_files(
files: List[str], base_path: Optional[Path] = None, level: int = 0) -> Dict[str, Any]:
files: List[str], base_path: Optional[Path] = None, level: int = 0
) -> Dict[str, Any]:
"""
Recursively load configuration files if specified.
Sub-files are assumed to be relative to the initial config.
@@ -90,8 +93,8 @@ def load_from_files(
files_loaded = []
# We expect here a list of config filenames
for filename in files:
logger.info(f'Using config: {filename} ...')
if filename == '-':
logger.info(f"Using config: {filename} ...")
if filename == "-":
# Immediately load stdin and return
return load_config_file(filename)
file = Path(filename)
@@ -100,10 +103,11 @@ def load_from_files(
file = base_path / file
config_tmp = load_config_file(str(file))
if 'add_config_files' in config_tmp:
if "add_config_files" in config_tmp:
config_sub = load_from_files(
config_tmp['add_config_files'], file.resolve().parent, level + 1)
files_loaded.extend(config_sub.get('config_files', []))
config_tmp["add_config_files"], file.resolve().parent, level + 1
)
files_loaded.extend(config_sub.get("config_files", []))
config_tmp = deep_merge_dicts(config_tmp, config_sub)
files_loaded.insert(0, str(file))
@@ -111,6 +115,6 @@ def load_from_files(
# Merge config options, overwriting prior values
config = deep_merge_dicts(config_tmp, config)
config['config_files'] = files_loaded
config["config_files"] = files_loaded
return config

View File

@@ -1,6 +1,7 @@
"""
This module contains the argument manager class
"""
import logging
import re
from datetime import datetime, timezone
@@ -22,9 +23,13 @@ class TimeRange:
if *type is None, don't use corresponding startvalue.
"""
def __init__(self, starttype: Optional[str] = None, stoptype: Optional[str] = None,
startts: int = 0, stopts: int = 0):
def __init__(
self,
starttype: Optional[str] = None,
stoptype: Optional[str] = None,
startts: int = 0,
stopts: int = 0,
):
self.starttype: Optional[str] = starttype
self.stoptype: Optional[str] = stoptype
self.startts: int = startts
@@ -48,12 +53,12 @@ class TimeRange:
Returns a string representation of the timerange as used by parse_timerange.
Follows the format yyyymmdd-yyyymmdd - leaving out the parts that are not set.
"""
start = ''
stop = ''
start = ""
stop = ""
if startdt := self.startdt:
start = startdt.strftime('%Y%m%d')
start = startdt.strftime("%Y%m%d")
if stopdt := self.stopdt:
stop = stopdt.strftime('%Y%m%d')
stop = stopdt.strftime("%Y%m%d")
return f"{start}-{stop}"
@property
@@ -61,7 +66,7 @@ class TimeRange:
"""
Returns a string representation of the start date
"""
val = 'unbounded'
val = "unbounded"
if (startdt := self.startdt) is not None:
val = startdt.strftime(DATETIME_PRINT_FORMAT)
return val
@@ -71,15 +76,19 @@ class TimeRange:
"""
Returns a string representation of the stop date
"""
val = 'unbounded'
val = "unbounded"
if (stopdt := self.stopdt) is not None:
val = stopdt.strftime(DATETIME_PRINT_FORMAT)
return val
def __eq__(self, other):
"""Override the default Equals behavior"""
return (self.starttype == other.starttype and self.stoptype == other.stoptype
and self.startts == other.startts and self.stopts == other.stopts)
return (
self.starttype == other.starttype
and self.stoptype == other.stoptype
and self.startts == other.startts
and self.stopts == other.stopts
)
def subtract_start(self, seconds: int) -> None:
"""
@@ -90,8 +99,9 @@ class TimeRange:
if self.startts:
self.startts = self.startts - seconds
def adjust_start_if_necessary(self, timeframe_secs: int, startup_candles: int,
min_date: datetime) -> None:
def adjust_start_if_necessary(
self, timeframe_secs: int, startup_candles: int, min_date: datetime
) -> None:
"""
Adjust startts by <startup_candles> candles.
Applies only if no startup-candles have been available.
@@ -101,13 +111,13 @@ class TimeRange:
has to be moved
:return: None (Modifies the object in place)
"""
if (not self.starttype or (startup_candles
and min_date.timestamp() >= self.startts)):
if not self.starttype or (startup_candles and min_date.timestamp() >= self.startts):
# If no startts was defined, or backtest-data starts at the defined backtest-date
logger.warning("Moving start-date by %s candles to account for startup time.",
startup_candles)
logger.warning(
"Moving start-date by %s candles to account for startup time.", startup_candles
)
self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)
self.starttype = 'date'
self.starttype = "date"
@classmethod
def parse_timerange(cls, text: Optional[str]) -> Self:
@@ -118,16 +128,17 @@ class TimeRange:
"""
if not text:
return cls(None, None, 0, 0)
syntax = [(r'^-(\d{8})$', (None, 'date')),
(r'^(\d{8})-$', ('date', None)),
(r'^(\d{8})-(\d{8})$', ('date', 'date')),
(r'^-(\d{10})$', (None, 'date')),
(r'^(\d{10})-$', ('date', None)),
(r'^(\d{10})-(\d{10})$', ('date', 'date')),
(r'^-(\d{13})$', (None, 'date')),
(r'^(\d{13})-$', ('date', None)),
(r'^(\d{13})-(\d{13})$', ('date', 'date')),
]
syntax = [
(r"^-(\d{8})$", (None, "date")),
(r"^(\d{8})-$", ("date", None)),
(r"^(\d{8})-(\d{8})$", ("date", "date")),
(r"^-(\d{10})$", (None, "date")),
(r"^(\d{10})-$", ("date", None)),
(r"^(\d{10})-(\d{10})$", ("date", "date")),
(r"^-(\d{13})$", (None, "date")),
(r"^(\d{13})-$", ("date", None)),
(r"^(\d{13})-(\d{13})$", ("date", "date")),
]
for rex, stype in syntax:
# Apply the regular expression to text
match = re.match(rex, text)
@@ -138,9 +149,12 @@ class TimeRange:
stop: int = 0
if stype[0]:
starts = rvals[index]
if stype[0] == 'date' and len(starts) == 8:
start = int(datetime.strptime(starts, '%Y%m%d').replace(
tzinfo=timezone.utc).timestamp())
if stype[0] == "date" and len(starts) == 8:
start = int(
datetime.strptime(starts, "%Y%m%d")
.replace(tzinfo=timezone.utc)
.timestamp()
)
elif len(starts) == 13:
start = int(starts) // 1000
else:
@@ -148,15 +162,19 @@ class TimeRange:
index += 1
if stype[1]:
stops = rvals[index]
if stype[1] == 'date' and len(stops) == 8:
stop = int(datetime.strptime(stops, '%Y%m%d').replace(
tzinfo=timezone.utc).timestamp())
if stype[1] == "date" and len(stops) == 8:
stop = int(
datetime.strptime(stops, "%Y%m%d")
.replace(tzinfo=timezone.utc)
.timestamp()
)
elif len(stops) == 13:
stop = int(stops) // 1000
else:
stop = int(stops)
if start > stop > 0:
raise ConfigurationError(
f'Start date is after stop date for timerange "{text}"')
f'Start date is after stop date for timerange "{text}"'
)
return cls(stype[0], stype[1], start, stop)
raise ConfigurationError(f'Incorrect syntax for timerange "{text}"')

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,4 @@ Module to handle data operations for freqtrade
"""
# limit what's imported when using `from freqtrade.data import *`
__all__ = [
'converter'
]
__all__ = ["converter"]

View File

@@ -1,6 +1,7 @@
"""
Helpers when analyzing backtest data
"""
import logging
from copy import copy
from datetime import datetime, timezone
@@ -21,14 +22,35 @@ from freqtrade.types import BacktestHistoryEntryType, BacktestResultType
logger = logging.getLogger(__name__)
# Newest format
BT_DATA_COLUMNS = ['pair', 'stake_amount', 'max_stake_amount', 'amount',
'open_date', 'close_date', 'open_rate', 'close_rate',
'fee_open', 'fee_close', 'trade_duration',
'profit_ratio', 'profit_abs', 'exit_reason',
'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs',
'stop_loss_ratio', 'min_rate', 'max_rate', 'is_open', 'enter_tag',
'leverage', 'is_short', 'open_timestamp', 'close_timestamp', 'orders'
]
BT_DATA_COLUMNS = [
"pair",
"stake_amount",
"max_stake_amount",
"amount",
"open_date",
"close_date",
"open_rate",
"close_rate",
"fee_open",
"fee_close",
"trade_duration",
"profit_ratio",
"profit_abs",
"exit_reason",
"initial_stop_loss_abs",
"initial_stop_loss_ratio",
"stop_loss_abs",
"stop_loss_ratio",
"min_rate",
"max_rate",
"is_open",
"enter_tag",
"leverage",
"is_short",
"open_timestamp",
"close_timestamp",
"orders",
]
def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> str:
@@ -50,15 +72,16 @@ def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> s
if not filename.is_file():
raise ValueError(
f"Directory '{directory}' does not seem to contain backtest statistics yet.")
f"Directory '{directory}' does not seem to contain backtest statistics yet."
)
with filename.open() as file:
data = json_load(file)
if f'latest_{variant}' not in data:
if f"latest_{variant}" not in data:
raise ValueError(f"Invalid '{LAST_BT_RESULT_FN}' format.")
return data[f'latest_{variant}']
return data[f"latest_{variant}"]
def get_latest_backtest_filename(directory: Union[Path, str]) -> str:
@@ -71,7 +94,7 @@ def get_latest_backtest_filename(directory: Union[Path, str]) -> str:
* `directory/.last_result.json` does not exist
* `directory/.last_result.json` has the wrong content
"""
return get_latest_optimize_filename(directory, 'backtest')
return get_latest_optimize_filename(directory, "backtest")
def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str:
@@ -85,14 +108,15 @@ def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str:
* `directory/.last_result.json` has the wrong content
"""
try:
return get_latest_optimize_filename(directory, 'hyperopt')
return get_latest_optimize_filename(directory, "hyperopt")
except ValueError:
# Return default (legacy) pickle filename
return 'hyperopt_results.pickle'
return "hyperopt_results.pickle"
def get_latest_hyperopt_file(
directory: Union[Path, str], predef_filename: Optional[str] = None) -> Path:
directory: Union[Path, str], predef_filename: Optional[str] = None
) -> Path:
"""
Get latest hyperopt export based on '.last_result.json'.
:param directory: Directory to search for last result
@@ -107,7 +131,8 @@ def get_latest_hyperopt_file(
if predef_filename:
if Path(predef_filename).is_absolute():
raise ConfigurationError(
"--hyperopt-filename expects only the filename, not an absolute path.")
"--hyperopt-filename expects only the filename, not an absolute path."
)
return directory / predef_filename
return directory / get_latest_hyperopt_filename(directory)
@@ -126,7 +151,7 @@ def load_backtest_metadata(filename: Union[Path, str]) -> Dict[str, Any]:
except FileNotFoundError:
return {}
except Exception as e:
raise OperationalException('Unexpected error while loading backtest metadata.') from e
raise OperationalException("Unexpected error while loading backtest metadata.") from e
def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType:
@@ -147,7 +172,7 @@ def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType:
# Legacy list format does not contain metadata.
if isinstance(data, dict):
data['metadata'] = load_backtest_metadata(filename)
data["metadata"] = load_backtest_metadata(filename)
return data
@@ -159,38 +184,39 @@ def load_and_merge_backtest_result(strategy_name: str, filename: Path, results:
:param results: dict to merge the result to.
"""
bt_data = load_backtest_stats(filename)
k: Literal['metadata', 'strategy']
for k in ('metadata', 'strategy'): # type: ignore
k: Literal["metadata", "strategy"]
for k in ("metadata", "strategy"): # type: ignore
results[k][strategy_name] = bt_data[k][strategy_name]
results['metadata'][strategy_name]['filename'] = filename.stem
comparison = bt_data['strategy_comparison']
results["metadata"][strategy_name]["filename"] = filename.stem
comparison = bt_data["strategy_comparison"]
for i in range(len(comparison)):
if comparison[i]['key'] == strategy_name:
results['strategy_comparison'].append(comparison[i])
if comparison[i]["key"] == strategy_name:
results["strategy_comparison"].append(comparison[i])
break
def _get_backtest_files(dirname: Path) -> List[Path]:
# Weird glob expression here avoids including .meta.json files.
return list(reversed(sorted(dirname.glob('backtest-result-*-[0-9][0-9].json'))))
return list(reversed(sorted(dirname.glob("backtest-result-*-[0-9][0-9].json"))))
def _extract_backtest_result(filename: Path) -> List[BacktestHistoryEntryType]:
metadata = load_backtest_metadata(filename)
return [
{
'filename': filename.stem,
'strategy': s,
'run_id': v['run_id'],
'notes': v.get('notes', ''),
"filename": filename.stem,
"strategy": s,
"run_id": v["run_id"],
"notes": v.get("notes", ""),
# Backtest "run" time
'backtest_start_time': v['backtest_start_time'],
"backtest_start_time": v["backtest_start_time"],
# Backtest timerange
'backtest_start_ts': v.get('backtest_start_ts', None),
'backtest_end_ts': v.get('backtest_end_ts', None),
'timeframe': v.get('timeframe', None),
'timeframe_detail': v.get('timeframe_detail', None),
} for s, v in metadata.items()
"backtest_start_ts": v.get("backtest_start_ts", None),
"backtest_end_ts": v.get("backtest_end_ts", None),
"timeframe": v.get("timeframe", None),
"timeframe_detail": v.get("timeframe_detail", None),
}
for s, v in metadata.items()
]
@@ -218,7 +244,7 @@ def delete_backtest_result(file_abs: Path):
"""
# *.meta.json
logger.info(f"Deleting backtest result file: {file_abs.name}")
file_abs_meta = file_abs.with_suffix('.meta.json')
file_abs_meta = file_abs.with_suffix(".meta.json")
file_abs.unlink()
file_abs_meta.unlink()
@@ -244,12 +270,13 @@ def get_backtest_market_change(filename: Path, include_ts: bool = True) -> pd.Da
"""
df = pd.read_feather(filename)
if include_ts:
df.loc[:, '__date_ts'] = df.loc[:, 'date'].astype(np.int64) // 1000 // 1000
df.loc[:, "__date_ts"] = df.loc[:, "date"].astype(np.int64) // 1000 // 1000
return df
def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, str],
min_backtest_date: Optional[datetime] = None) -> Dict[str, Any]:
def find_existing_backtest_stats(
dirname: Union[Path, str], run_ids: Dict[str, str], min_backtest_date: Optional[datetime] = None
) -> Dict[str, Any]:
"""
Find existing backtest stats that match specified run IDs and load them.
:param dirname: pathlib.Path object, or string pointing to the file.
@@ -261,9 +288,9 @@ def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, s
run_ids = copy(run_ids)
dirname = Path(dirname)
results: Dict[str, Any] = {
'metadata': {},
'strategy': {},
'strategy_comparison': [],
"metadata": {},
"strategy": {},
"strategy_comparison": [],
}
for filename in _get_backtest_files(dirname):
@@ -280,14 +307,14 @@ def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, s
continue
if min_backtest_date is not None:
backtest_date = strategy_metadata['backtest_start_time']
backtest_date = strategy_metadata["backtest_start_time"]
backtest_date = datetime.fromtimestamp(backtest_date, tz=timezone.utc)
if backtest_date < min_backtest_date:
# Do not use a cached result for this strategy as first result is too old.
del run_ids[strategy_name]
continue
if strategy_metadata['run_id'] == run_id:
if strategy_metadata["run_id"] == run_id:
del run_ids[strategy_name]
load_and_merge_backtest_result(strategy_name, filename, results)
@@ -300,20 +327,20 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
"""
Compatibility support for older backtest data.
"""
df['open_date'] = pd.to_datetime(df['open_date'], utc=True)
df['close_date'] = pd.to_datetime(df['close_date'], utc=True)
df["open_date"] = pd.to_datetime(df["open_date"], utc=True)
df["close_date"] = pd.to_datetime(df["close_date"], utc=True)
# Compatibility support for pre short Columns
if 'is_short' not in df.columns:
df['is_short'] = False
if 'leverage' not in df.columns:
df['leverage'] = 1.0
if 'enter_tag' not in df.columns:
df['enter_tag'] = df['buy_tag']
df = df.drop(['buy_tag'], axis=1)
if 'max_stake_amount' not in df.columns:
df['max_stake_amount'] = df['stake_amount']
if 'orders' not in df.columns:
df['orders'] = None
if "is_short" not in df.columns:
df["is_short"] = False
if "leverage" not in df.columns:
df["leverage"] = 1.0
if "enter_tag" not in df.columns:
df["enter_tag"] = df["buy_tag"]
df = df.drop(["buy_tag"], axis=1)
if "max_stake_amount" not in df.columns:
df["max_stake_amount"] = df["stake_amount"]
if "orders" not in df.columns:
df["orders"] = None
return df
@@ -329,23 +356,25 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non
data = load_backtest_stats(filename)
if not isinstance(data, list):
# new, nested format
if 'strategy' not in data:
if "strategy" not in data:
raise ValueError("Unknown dataformat.")
if not strategy:
if len(data['strategy']) == 1:
strategy = list(data['strategy'].keys())[0]
if len(data["strategy"]) == 1:
strategy = list(data["strategy"].keys())[0]
else:
raise ValueError("Detected backtest result with more than one strategy. "
"Please specify a strategy.")
raise ValueError(
"Detected backtest result with more than one strategy. "
"Please specify a strategy."
)
if strategy not in data['strategy']:
if strategy not in data["strategy"]:
raise ValueError(
f"Strategy {strategy} not available in the backtest result. "
f"Available strategies are '{','.join(data['strategy'].keys())}'"
)
)
data = data['strategy'][strategy]['trades']
data = data["strategy"][strategy]["trades"]
df = pd.DataFrame(data)
if not df.empty:
df = _load_backtest_data_df_compatibility(df)
@@ -353,7 +382,8 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non
else:
# old format - only with lists.
raise OperationalException(
"Backtest-results with only trades data are no longer supported.")
"Backtest-results with only trades data are no longer supported."
)
if not df.empty:
df = df.sort_values("open_date").reset_index(drop=True)
return df
@@ -368,23 +398,26 @@ def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataF
:return: dataframe with open-counts per time-period in timeframe
"""
from freqtrade.exchange import timeframe_to_resample_freq
timeframe_freq = timeframe_to_resample_freq(timeframe)
dates = [pd.Series(pd.date_range(row[1]['open_date'], row[1]['close_date'],
freq=timeframe_freq))
for row in results[['open_date', 'close_date']].iterrows()]
dates = [
pd.Series(pd.date_range(row[1]["open_date"], row[1]["close_date"], freq=timeframe_freq))
for row in results[["open_date", "close_date"]].iterrows()
]
deltas = [len(x) for x in dates]
dates = pd.Series(pd.concat(dates).values, name='date')
dates = pd.Series(pd.concat(dates).values, name="date")
df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)
df2 = pd.concat([dates, df2], axis=1)
df2 = df2.set_index('date')
df_final = df2.resample(timeframe_freq)[['pair']].count()
df_final = df_final.rename({'pair': 'open_trades'}, axis=1)
df2 = df2.set_index("date")
df_final = df2.resample(timeframe_freq)[["pair"]].count()
df_final = df_final.rename({"pair": "open_trades"}, axis=1)
return df_final
def evaluate_result_multi(results: pd.DataFrame, timeframe: str,
max_open_trades: IntOrInf) -> pd.DataFrame:
def evaluate_result_multi(
results: pd.DataFrame, timeframe: str, max_open_trades: IntOrInf
) -> pd.DataFrame:
"""
Find overlapping trades by expanding each trade once per period it was open
and then counting overlaps
@@ -394,7 +427,7 @@ def evaluate_result_multi(results: pd.DataFrame, timeframe: str,
:return: dataframe with open-counts per time-period in freq
"""
df_final = analyze_trade_parallelism(results, timeframe)
return df_final[df_final['open_trades'] > max_open_trades]
return df_final[df_final["open_trades"] > max_open_trades]
def trade_list_to_dataframe(trades: Union[List[Trade], List[LocalTrade]]) -> pd.DataFrame:
@@ -405,9 +438,9 @@ def trade_list_to_dataframe(trades: Union[List[Trade], List[LocalTrade]]) -> pd.
"""
df = pd.DataFrame.from_records([t.to_json(True) for t in trades], columns=BT_DATA_COLUMNS)
if len(df) > 0:
df['close_date'] = pd.to_datetime(df['close_date'], utc=True)
df['open_date'] = pd.to_datetime(df['open_date'], utc=True)
df['close_rate'] = df['close_rate'].astype('float64')
df["close_date"] = pd.to_datetime(df["close_date"], utc=True)
df["open_date"] = pd.to_datetime(df["open_date"], utc=True)
df["close_rate"] = df["close_rate"].astype("float64")
return df
@@ -429,8 +462,13 @@ def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataF
return trades
def load_trades(source: str, db_url: str, exportfilename: Path,
no_trades: bool = False, strategy: Optional[str] = None) -> pd.DataFrame:
def load_trades(
source: str,
db_url: str,
exportfilename: Path,
no_trades: bool = False,
strategy: Optional[str] = None,
) -> pd.DataFrame:
"""
Based on configuration option 'trade_source':
* loads data from DB (using `db_url`)
@@ -451,8 +489,9 @@ def load_trades(source: str, db_url: str, exportfilename: Path,
return load_backtest_data(exportfilename, strategy)
def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame,
date_index=False) -> pd.DataFrame:
def extract_trades_of_period(
dataframe: pd.DataFrame, trades: pd.DataFrame, date_index=False
) -> pd.DataFrame:
"""
Compare trades and backtested pair DataFrames to get trades performed on backtested period
:return: the DataFrame of a trades of period
@@ -461,8 +500,9 @@ def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame,
trades_start = dataframe.index[0]
trades_stop = dataframe.index[-1]
else:
trades_start = dataframe.iloc[0]['date']
trades_stop = dataframe.iloc[-1]['date']
trades = trades.loc[(trades['open_date'] >= trades_start) &
(trades['close_date'] <= trades_stop)]
trades_start = dataframe.iloc[0]["date"]
trades_stop = dataframe.iloc[-1]["date"]
trades = trades.loc[
(trades["open_date"] >= trades_start) & (trades["close_date"] <= trades_stop)
]
return trades

View File

@@ -1,28 +1,38 @@
from freqtrade.data.converter.converter import (clean_ohlcv_dataframe, convert_ohlcv_format,
ohlcv_fill_up_missing_data, ohlcv_to_dataframe,
order_book_to_dataframe, reduce_dataframe_footprint,
trim_dataframe, trim_dataframes)
from freqtrade.data.converter.trade_converter import (convert_trades_format,
convert_trades_to_ohlcv, trades_convert_types,
trades_df_remove_duplicates,
trades_dict_to_list, trades_list_to_df,
trades_to_ohlcv)
from freqtrade.data.converter.converter import (
clean_ohlcv_dataframe,
convert_ohlcv_format,
ohlcv_fill_up_missing_data,
ohlcv_to_dataframe,
order_book_to_dataframe,
reduce_dataframe_footprint,
trim_dataframe,
trim_dataframes,
)
from freqtrade.data.converter.trade_converter import (
convert_trades_format,
convert_trades_to_ohlcv,
trades_convert_types,
trades_df_remove_duplicates,
trades_dict_to_list,
trades_list_to_df,
trades_to_ohlcv,
)
__all__ = [
'clean_ohlcv_dataframe',
'convert_ohlcv_format',
'ohlcv_fill_up_missing_data',
'ohlcv_to_dataframe',
'order_book_to_dataframe',
'reduce_dataframe_footprint',
'trim_dataframe',
'trim_dataframes',
'convert_trades_format',
'convert_trades_to_ohlcv',
'trades_convert_types',
'trades_df_remove_duplicates',
'trades_dict_to_list',
'trades_list_to_df',
'trades_to_ohlcv',
"clean_ohlcv_dataframe",
"convert_ohlcv_format",
"ohlcv_fill_up_missing_data",
"ohlcv_to_dataframe",
"order_book_to_dataframe",
"reduce_dataframe_footprint",
"trim_dataframe",
"trim_dataframes",
"convert_trades_format",
"convert_trades_to_ohlcv",
"trades_convert_types",
"trades_df_remove_duplicates",
"trades_dict_to_list",
"trades_list_to_df",
"trades_to_ohlcv",
]

View File

@@ -1,6 +1,7 @@
"""
Functions to convert data from one format to another
"""
import logging
from typing import Dict
@@ -15,8 +16,14 @@ from freqtrade.enums import CandleType, TradingMode
logger = logging.getLogger(__name__)
def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *,
fill_missing: bool = True, drop_incomplete: bool = True) -> DataFrame:
def ohlcv_to_dataframe(
ohlcv: list,
timeframe: str,
pair: str,
*,
fill_missing: bool = True,
drop_incomplete: bool = True,
) -> DataFrame:
"""
Converts a list with candle (OHLCV) data (in format returned by ccxt.fetch_ohlcv)
to a Dataframe
@@ -32,20 +39,28 @@ def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *,
cols = DEFAULT_DATAFRAME_COLUMNS
df = DataFrame(ohlcv, columns=cols)
df['date'] = to_datetime(df['date'], unit='ms', utc=True)
df["date"] = to_datetime(df["date"], unit="ms", utc=True)
# Some exchanges return int values for Volume and even for OHLC.
# Convert them since TA-LIB indicators used in the strategy assume floats
# and fail with exception...
df = df.astype(dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float',
'volume': 'float'})
return clean_ohlcv_dataframe(df, timeframe, pair,
fill_missing=fill_missing,
drop_incomplete=drop_incomplete)
df = df.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
return clean_ohlcv_dataframe(
df, timeframe, pair, fill_missing=fill_missing, drop_incomplete=drop_incomplete
)
def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,
fill_missing: bool, drop_incomplete: bool) -> DataFrame:
def clean_ohlcv_dataframe(
data: DataFrame, timeframe: str, pair: str, *, fill_missing: bool, drop_incomplete: bool
) -> DataFrame:
"""
Cleanse a OHLCV dataframe by
* Grouping it by date (removes duplicate tics)
@@ -60,17 +75,19 @@ def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,
:return: DataFrame
"""
# group by index and aggregate results to eliminate duplicate ticks
data = data.groupby(by='date', as_index=False, sort=True).agg({
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'max',
})
data = data.groupby(by="date", as_index=False, sort=True).agg(
{
"open": "first",
"high": "max",
"low": "min",
"close": "last",
"volume": "max",
}
)
# eliminate partial candle
if drop_incomplete:
data.drop(data.tail(1).index, inplace=True)
logger.debug('Dropping last candle')
logger.debug("Dropping last candle")
if fill_missing:
return ohlcv_fill_up_missing_data(data, timeframe, pair)
@@ -81,37 +98,35 @@ def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,
def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) -> DataFrame:
"""
Fills up missing data with 0 volume rows,
using the previous close as price for "open", "high" "low" and "close", volume is set to 0
using the previous close as price for "open", "high", "low" and "close", volume is set to 0
"""
from freqtrade.exchange import timeframe_to_resample_freq
ohlcv_dict = {
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'
}
ohlcv_dict = {"open": "first", "high": "max", "low": "min", "close": "last", "volume": "sum"}
resample_interval = timeframe_to_resample_freq(timeframe)
# Resample to create "NAN" values
df = dataframe.resample(resample_interval, on='date').agg(ohlcv_dict)
df = dataframe.resample(resample_interval, on="date").agg(ohlcv_dict)
# Forwardfill close for missing columns
df['close'] = df['close'].ffill()
df["close"] = df["close"].ffill()
# Use close for "open, high, low"
df.loc[:, ['open', 'high', 'low']] = df[['open', 'high', 'low']].fillna(
value={'open': df['close'],
'high': df['close'],
'low': df['close'],
})
df.loc[:, ["open", "high", "low"]] = df[["open", "high", "low"]].fillna(
value={
"open": df["close"],
"high": df["close"],
"low": df["close"],
}
)
df.reset_index(inplace=True)
len_before = len(dataframe)
len_after = len(df)
pct_missing = (len_after - len_before) / len_before if len_before > 0 else 0
if len_before != len_after:
message = (f"Missing data fillup for {pair}, {timeframe}: "
f"before: {len_before} - after: {len_after} - {pct_missing:.2%}")
message = (
f"Missing data fillup for {pair}, {timeframe}: "
f"before: {len_before} - after: {len_after} - {pct_missing:.2%}"
)
if pct_missing > 0.01:
logger.info(message)
else:
@@ -120,8 +135,9 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str)
return df
def trim_dataframe(df: DataFrame, timerange, *, df_date_col: str = 'date',
startup_candles: int = 0) -> DataFrame:
def trim_dataframe(
df: DataFrame, timerange, *, df_date_col: str = "date", startup_candles: int = 0
) -> DataFrame:
"""
Trim dataframe based on given timerange
:param df: Dataframe to trim
@@ -134,15 +150,16 @@ def trim_dataframe(df: DataFrame, timerange, *, df_date_col: str = 'date',
# Trim candles instead of timeframe in case of given startup_candle count
df = df.iloc[startup_candles:, :]
else:
if timerange.starttype == 'date':
if timerange.starttype == "date":
df = df.loc[df[df_date_col] >= timerange.startdt, :]
if timerange.stoptype == 'date':
if timerange.stoptype == "date":
df = df.loc[df[df_date_col] <= timerange.stopdt, :]
return df
def trim_dataframes(preprocessed: Dict[str, DataFrame], timerange,
startup_candles: int) -> Dict[str, DataFrame]:
def trim_dataframes(
preprocessed: Dict[str, DataFrame], timerange, startup_candles: int
) -> Dict[str, DataFrame]:
"""
Trim startup period from analyzed dataframes
:param preprocessed: Dict of pair: dataframe
@@ -157,8 +174,9 @@ def trim_dataframes(preprocessed: Dict[str, DataFrame], timerange,
if not trimed_df.empty:
processed[pair] = trimed_df
else:
logger.warning(f'{pair} has no data left after adjusting for startup candles, '
f'skipping.')
logger.warning(
f"{pair} has no data left after adjusting for startup candles, skipping."
)
return processed
@@ -170,19 +188,28 @@ def order_book_to_dataframe(bids: list, asks: list) -> DataFrame:
b_sum b_size bids asks a_size a_sum
-------------------------------------------------------------------
"""
cols = ['bids', 'b_size']
cols = ["bids", "b_size"]
bids_frame = DataFrame(bids, columns=cols)
# add cumulative sum column
bids_frame['b_sum'] = bids_frame['b_size'].cumsum()
cols2 = ['asks', 'a_size']
bids_frame["b_sum"] = bids_frame["b_size"].cumsum()
cols2 = ["asks", "a_size"]
asks_frame = DataFrame(asks, columns=cols2)
# add cumulative sum column
asks_frame['a_sum'] = asks_frame['a_size'].cumsum()
asks_frame["a_sum"] = asks_frame["a_size"].cumsum()
frame = pd.concat([bids_frame['b_sum'], bids_frame['b_size'], bids_frame['bids'],
asks_frame['asks'], asks_frame['a_size'], asks_frame['a_sum']], axis=1,
keys=['b_sum', 'b_size', 'bids', 'asks', 'a_size', 'a_sum'])
frame = pd.concat(
[
bids_frame["b_sum"],
bids_frame["b_size"],
bids_frame["bids"],
asks_frame["asks"],
asks_frame["a_size"],
asks_frame["a_sum"],
],
axis=1,
keys=["b_sum", "b_size", "bids", "asks", "a_size", "a_sum"],
)
# logger.info('order book %s', frame )
return frame
@@ -201,47 +228,51 @@ def convert_ohlcv_format(
:param erase: Erase source data (does not apply if source and target format are identical)
"""
from freqtrade.data.history import get_datahandler
src = get_datahandler(config['datadir'], convert_from)
trg = get_datahandler(config['datadir'], convert_to)
timeframes = config.get('timeframes', [config.get('timeframe')])
src = get_datahandler(config["datadir"], convert_from)
trg = get_datahandler(config["datadir"], convert_to)
timeframes = config.get("timeframes", [config.get("timeframe")])
logger.info(f"Converting candle (OHLCV) for timeframe {timeframes}")
candle_types = [CandleType.from_string(ct) for ct in config.get('candle_types', [
c.value for c in CandleType])]
candle_types = [
CandleType.from_string(ct)
for ct in config.get("candle_types", [c.value for c in CandleType])
]
logger.info(candle_types)
paircombs = src.ohlcv_get_available_data(config['datadir'], TradingMode.SPOT)
paircombs.extend(src.ohlcv_get_available_data(config['datadir'], TradingMode.FUTURES))
paircombs = src.ohlcv_get_available_data(config["datadir"], TradingMode.SPOT)
paircombs.extend(src.ohlcv_get_available_data(config["datadir"], TradingMode.FUTURES))
if 'pairs' in config:
if "pairs" in config:
# Filter pairs
paircombs = [comb for comb in paircombs if comb[0] in config['pairs']]
paircombs = [comb for comb in paircombs if comb[0] in config["pairs"]]
if 'timeframes' in config:
paircombs = [comb for comb in paircombs if comb[1] in config['timeframes']]
if "timeframes" in config:
paircombs = [comb for comb in paircombs if comb[1] in config["timeframes"]]
paircombs = [comb for comb in paircombs if comb[2] in candle_types]
paircombs = sorted(paircombs, key=lambda x: (x[0], x[1], x[2].value))
formatted_paircombs = '\n'.join([f"{pair}, {timeframe}, {candle_type}"
for pair, timeframe, candle_type in paircombs])
formatted_paircombs = "\n".join(
[f"{pair}, {timeframe}, {candle_type}" for pair, timeframe, candle_type in paircombs]
)
logger.info(f"Converting candle (OHLCV) data for the following pair combinations:\n"
f"{formatted_paircombs}")
logger.info(
f"Converting candle (OHLCV) data for the following pair combinations:\n"
f"{formatted_paircombs}"
)
for pair, timeframe, candle_type in paircombs:
data = src.ohlcv_load(pair=pair, timeframe=timeframe,
timerange=None,
fill_missing=False,
drop_incomplete=False,
startup_candles=0,
candle_type=candle_type)
data = src.ohlcv_load(
pair=pair,
timeframe=timeframe,
timerange=None,
fill_missing=False,
drop_incomplete=False,
startup_candles=0,
candle_type=candle_type,
)
logger.info(f"Converting {len(data)} {timeframe} {candle_type} candles for {pair}")
if len(data) > 0:
trg.ohlcv_store(
pair=pair,
timeframe=timeframe,
data=data,
candle_type=candle_type
)
trg.ohlcv_store(pair=pair, timeframe=timeframe, data=data, candle_type=candle_type)
if erase and convert_from != convert_to:
logger.info(f"Deleting source data for {pair} / {timeframe}")
src.ohlcv_purge(pair=pair, timeframe=timeframe, candle_type=candle_type)
@@ -254,12 +285,11 @@ def reduce_dataframe_footprint(df: DataFrame) -> DataFrame:
:return: Dataframe converted to float/int 32s
"""
logger.debug(f"Memory usage of dataframe is "
f"{df.memory_usage().sum() / 1024**2:.2f} MB")
logger.debug(f"Memory usage of dataframe is {df.memory_usage().sum() / 1024**2:.2f} MB")
df_dtypes = df.dtypes
for column, dtype in df_dtypes.items():
if column in ['open', 'high', 'low', 'close', 'volume']:
if column in ["open", "high", "low", "close", "volume"]:
continue
if dtype == np.float64:
df_dtypes[column] = np.float32
@@ -267,7 +297,6 @@ def reduce_dataframe_footprint(df: DataFrame) -> DataFrame:
df_dtypes[column] = np.int32
df = df.astype(df_dtypes)
logger.debug(f"Memory usage after optimization is: "
f"{df.memory_usage().sum() / 1024**2:.2f} MB")
logger.debug(f"Memory usage after optimization is: {df.memory_usage().sum() / 1024**2:.2f} MB")
return df

View File

@@ -1,6 +1,7 @@
"""
Functions to convert data from one format to another
"""
import logging
from pathlib import Path
from typing import Dict, List
@@ -9,8 +10,13 @@ import pandas as pd
from pandas import DataFrame, to_datetime
from freqtrade.configuration import TimeRange
from freqtrade.constants import (DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, TRADES_DTYPES,
Config, TradeList)
from freqtrade.constants import (
DEFAULT_DATAFRAME_COLUMNS,
DEFAULT_TRADES_COLUMNS,
TRADES_DTYPES,
Config,
TradeList,
)
from freqtrade.enums import CandleType, TradingMode
from freqtrade.exceptions import OperationalException
@@ -25,7 +31,7 @@ def trades_df_remove_duplicates(trades: pd.DataFrame) -> pd.DataFrame:
:param trades: DataFrame with the columns constants.DEFAULT_TRADES_COLUMNS
:return: DataFrame with duplicates removed based on the 'timestamp' column
"""
return trades.drop_duplicates(subset=['timestamp', 'id'])
return trades.drop_duplicates(subset=["timestamp", "id"])
def trades_dict_to_list(trades: List[Dict]) -> TradeList:
@@ -42,7 +48,7 @@ def trades_convert_types(trades: DataFrame) -> DataFrame:
Convert Trades dtypes and add 'date' column
"""
trades = trades.astype(TRADES_DTYPES)
trades['date'] = to_datetime(trades['timestamp'], unit='ms', utc=True)
trades["date"] = to_datetime(trades["timestamp"], unit="ms", utc=True)
return trades
@@ -71,13 +77,14 @@ def trades_to_ohlcv(trades: DataFrame, timeframe: str) -> DataFrame:
:raises: ValueError if no trades are provided
"""
from freqtrade.exchange import timeframe_to_resample_freq
if trades.empty:
raise ValueError('Trade-list empty.')
df = trades.set_index('date', drop=True)
raise ValueError("Trade-list empty.")
df = trades.set_index("date", drop=True)
resample_interval = timeframe_to_resample_freq(timeframe)
df_new = df['price'].resample(resample_interval).ohlc()
df_new['volume'] = df['amount'].resample(resample_interval).sum()
df_new['date'] = df_new.index
df_new = df["price"].resample(resample_interval).ohlc()
df_new["volume"] = df["amount"].resample(resample_interval).sum()
df_new["date"] = df_new.index
# Drop 0 volume rows
df_new = df_new.dropna()
return df_new.loc[:, DEFAULT_DATAFRAME_COLUMNS]
@@ -97,24 +104,27 @@ def convert_trades_to_ohlcv(
Convert stored trades data to ohlcv data
"""
from freqtrade.data.history import get_datahandler
data_handler_trades = get_datahandler(datadir, data_format=data_format_trades)
data_handler_ohlcv = get_datahandler(datadir, data_format=data_format_ohlcv)
logger.info(f"About to convert pairs: '{', '.join(pairs)}', "
f"intervals: '{', '.join(timeframes)}' to {datadir}")
logger.info(
f"About to convert pairs: '{', '.join(pairs)}', "
f"intervals: '{', '.join(timeframes)}' to {datadir}"
)
trading_mode = TradingMode.FUTURES if candle_type != CandleType.SPOT else TradingMode.SPOT
for pair in pairs:
trades = data_handler_trades.trades_load(pair, trading_mode)
for timeframe in timeframes:
if erase:
if data_handler_ohlcv.ohlcv_purge(pair, timeframe, candle_type=candle_type):
logger.info(f'Deleting existing data for pair {pair}, interval {timeframe}.')
logger.info(f"Deleting existing data for pair {pair}, interval {timeframe}.")
try:
ohlcv = trades_to_ohlcv(trades, timeframe)
# Store ohlcv
data_handler_ohlcv.ohlcv_store(pair, timeframe, data=ohlcv, candle_type=candle_type)
except ValueError:
logger.warning(f'Could not convert {pair} to OHLCV.')
logger.warning(f"Could not convert {pair} to OHLCV.")
def convert_trades_format(config: Config, convert_from: str, convert_to: str, erase: bool):
@@ -125,25 +135,27 @@ def convert_trades_format(config: Config, convert_from: str, convert_to: str, er
:param convert_to: Target format
:param erase: Erase source data (does not apply if source and target format are identical)
"""
if convert_from == 'kraken_csv':
if config['exchange']['name'] != 'kraken':
if convert_from == "kraken_csv":
if config["exchange"]["name"] != "kraken":
raise OperationalException(
'Converting from csv is only supported for kraken.'
'Please refer to the documentation for details about this special mode.'
"Converting from csv is only supported for kraken."
"Please refer to the documentation for details about this special mode."
)
from freqtrade.data.converter.trade_converter_kraken import import_kraken_trades_from_csv
import_kraken_trades_from_csv(config, convert_to)
return
from freqtrade.data.history import get_datahandler
src = get_datahandler(config['datadir'], convert_from)
trg = get_datahandler(config['datadir'], convert_to)
if 'pairs' not in config:
config['pairs'] = src.trades_get_pairs(config['datadir'])
src = get_datahandler(config["datadir"], convert_from)
trg = get_datahandler(config["datadir"], convert_to)
if "pairs" not in config:
config["pairs"] = src.trades_get_pairs(config["datadir"])
logger.info(f"Converting trades for {config['pairs']}")
trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT)
for pair in config['pairs']:
trading_mode: TradingMode = config.get("trading_mode", TradingMode.SPOT)
for pair in config["pairs"]:
data = src.trades_load(pair, trading_mode)
logger.info(f"Converting {len(data)} trades for {pair}")
trg.trades_store(pair, data, trading_mode)

View File

@@ -4,8 +4,10 @@ from pathlib import Path
import pandas as pd
from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_TRADES_COLUMNS, Config
from freqtrade.data.converter.trade_converter import (trades_convert_types,
trades_df_remove_duplicates)
from freqtrade.data.converter.trade_converter import (
trades_convert_types,
trades_df_remove_duplicates,
)
from freqtrade.data.history import get_datahandler
from freqtrade.enums import TradingMode
from freqtrade.exceptions import OperationalException
@@ -15,32 +17,33 @@ from freqtrade.resolvers import ExchangeResolver
logger = logging.getLogger(__name__)
KRAKEN_CSV_TRADE_COLUMNS = ['timestamp', 'price', 'amount']
KRAKEN_CSV_TRADE_COLUMNS = ["timestamp", "price", "amount"]
def import_kraken_trades_from_csv(config: Config, convert_to: str):
"""
Import kraken trades from csv
"""
if config['exchange']['name'] != 'kraken':
raise OperationalException('This function is only for the kraken exchange.')
if config["exchange"]["name"] != "kraken":
raise OperationalException("This function is only for the kraken exchange.")
datadir: Path = config['datadir']
datadir: Path = config["datadir"]
data_handler = get_datahandler(datadir, data_format=convert_to)
tradesdir: Path = config['datadir'] / 'trades_csv'
tradesdir: Path = config["datadir"] / "trades_csv"
exchange = ExchangeResolver.load_exchange(config, validate=False)
# iterate through directories in this directory
data_symbols = {p.stem for p in tradesdir.rglob('*.csv')}
data_symbols = {p.stem for p in tradesdir.rglob("*.csv")}
# create pair/filename mapping
markets = {
(m['symbol'], m['altname']) for m in exchange.markets.values()
if m.get('altname') in data_symbols
(m["symbol"], m["altname"])
for m in exchange.markets.values()
if m.get("altname") in data_symbols
}
logger.info(f"Found csv files for {', '.join(data_symbols)}.")
if pairs_raw := config.get('pairs'):
if pairs_raw := config.get("pairs"):
pairs = expand_pairlist(pairs_raw, [m[0] for m in markets])
markets = {m for m in markets if m[0] in pairs}
if not markets:
@@ -66,18 +69,20 @@ def import_kraken_trades_from_csv(config: Config, convert_to: str):
trades = pd.concat(dfs, ignore_index=True)
del dfs
trades.loc[:, 'timestamp'] = trades['timestamp'] * 1e3
trades.loc[:, 'cost'] = trades['price'] * trades['amount']
trades.loc[:, "timestamp"] = trades["timestamp"] * 1e3
trades.loc[:, "cost"] = trades["price"] * trades["amount"]
for col in DEFAULT_TRADES_COLUMNS:
if col not in trades.columns:
trades.loc[:, col] = ''
trades.loc[:, col] = ""
trades = trades[DEFAULT_TRADES_COLUMNS]
trades = trades_convert_types(trades)
trades_df = trades_df_remove_duplicates(trades)
del trades
logger.info(f"{pair}: {len(trades_df)} trades, from "
f"{trades_df['date'].min():{DATETIME_PRINT_FORMAT}} to "
f"{trades_df['date'].max():{DATETIME_PRINT_FORMAT}}")
logger.info(
f"{pair}: {len(trades_df)} trades, from "
f"{trades_df['date'].min():{DATETIME_PRINT_FORMAT}} to "
f"{trades_df['date'].max():{DATETIME_PRINT_FORMAT}}"
)
data_handler.trades_store(pair, trades_df, TradingMode.SPOT)

View File

@@ -4,6 +4,7 @@ Responsible to provide data to the bot
including ticker and orderbook data, live and historical candle (OHLCV) data
Common Interface for bot and strategy to access data.
"""
import logging
from collections import deque
from datetime import datetime, timezone
@@ -12,8 +13,12 @@ from typing import Any, Dict, List, Optional, Tuple
from pandas import DataFrame, Timedelta, Timestamp, to_timedelta
from freqtrade.configuration import TimeRange
from freqtrade.constants import (FULL_DATAFRAME_THRESHOLD, Config, ListPairsWithTimeframes,
PairWithTimeframe)
from freqtrade.constants import (
FULL_DATAFRAME_THRESHOLD,
Config,
ListPairsWithTimeframes,
PairWithTimeframe,
)
from freqtrade.data.history import load_pair_history
from freqtrade.enums import CandleType, RPCMessageType, RunMode
from freqtrade.exceptions import ExchangeError, OperationalException
@@ -27,18 +32,17 @@ from freqtrade.util import PeriodicCache
logger = logging.getLogger(__name__)
NO_EXCHANGE_EXCEPTION = 'Exchange is not available to DataProvider.'
NO_EXCHANGE_EXCEPTION = "Exchange is not available to DataProvider."
MAX_DATAFRAME_CANDLES = 1000
class DataProvider:
def __init__(
self,
config: Config,
exchange: Optional[Exchange],
pairlists=None,
rpc: Optional[RPCManager] = None
rpc: Optional[RPCManager] = None,
) -> None:
self._config = config
self._exchange = exchange
@@ -49,18 +53,20 @@ class DataProvider:
self.__slice_date: Optional[datetime] = None
self.__cached_pairs_backtesting: Dict[PairWithTimeframe, DataFrame] = {}
self.__producer_pairs_df: Dict[str,
Dict[PairWithTimeframe, Tuple[DataFrame, datetime]]] = {}
self.__producer_pairs_df: Dict[
str, Dict[PairWithTimeframe, Tuple[DataFrame, datetime]]
] = {}
self.__producer_pairs: Dict[str, List[str]] = {}
self._msg_queue: deque = deque()
self._default_candle_type = self._config.get('candle_type_def', CandleType.SPOT)
self._default_timeframe = self._config.get('timeframe', '1h')
self._default_candle_type = self._config.get("candle_type_def", CandleType.SPOT)
self._default_timeframe = self._config.get("timeframe", "1h")
self.__msg_cache = PeriodicCache(
maxsize=1000, ttl=timeframe_to_seconds(self._default_timeframe))
maxsize=1000, ttl=timeframe_to_seconds(self._default_timeframe)
)
self.producers = self._config.get('external_message_consumer', {}).get('producers', [])
self.producers = self._config.get("external_message_consumer", {}).get("producers", [])
self.external_data_enabled = len(self.producers) > 0
def _set_dataframe_max_index(self, limit_index: int):
@@ -80,11 +86,7 @@ class DataProvider:
self.__slice_date = limit_date
def _set_cached_df(
self,
pair: str,
timeframe: str,
dataframe: DataFrame,
candle_type: CandleType
self, pair: str, timeframe: str, dataframe: DataFrame, candle_type: CandleType
) -> None:
"""
Store cached Dataframe.
@@ -96,8 +98,7 @@ class DataProvider:
:param candle_type: Any of the enum CandleType (must match trading mode!)
"""
pair_key = (pair, timeframe, candle_type)
self.__cached_pairs[pair_key] = (
dataframe, datetime.now(timezone.utc))
self.__cached_pairs[pair_key] = (dataframe, datetime.now(timezone.utc))
# For multiple producers we will want to merge the pairlists instead of overwriting
def _set_producer_pairs(self, pairlist: List[str], producer_name: str = "default"):
@@ -116,12 +117,7 @@ class DataProvider:
"""
return self.__producer_pairs.get(producer_name, []).copy()
def _emit_df(
self,
pair_key: PairWithTimeframe,
dataframe: DataFrame,
new_candle: bool
) -> None:
def _emit_df(self, pair_key: PairWithTimeframe, dataframe: DataFrame, new_candle: bool) -> None:
"""
Send this dataframe as an ANALYZED_DF message to RPC
@@ -131,19 +127,21 @@ class DataProvider:
"""
if self.__rpc:
msg: RPCAnalyzedDFMsg = {
'type': RPCMessageType.ANALYZED_DF,
'data': {
'key': pair_key,
'df': dataframe.tail(1),
'la': datetime.now(timezone.utc)
}
}
"type": RPCMessageType.ANALYZED_DF,
"data": {
"key": pair_key,
"df": dataframe.tail(1),
"la": datetime.now(timezone.utc),
},
}
self.__rpc.send_msg(msg)
if new_candle:
self.__rpc.send_msg({
'type': RPCMessageType.NEW_CANDLE,
'data': pair_key,
})
self.__rpc.send_msg(
{
"type": RPCMessageType.NEW_CANDLE,
"data": pair_key,
}
)
def _replace_external_df(
self,
@@ -152,7 +150,7 @@ class DataProvider:
last_analyzed: datetime,
timeframe: str,
candle_type: CandleType,
producer_name: str = "default"
producer_name: str = "default",
) -> None:
"""
Add the pair data to this class from an external source.
@@ -178,7 +176,7 @@ class DataProvider:
last_analyzed: datetime,
timeframe: str,
candle_type: CandleType,
producer_name: str = "default"
producer_name: str = "default",
) -> Tuple[bool, int]:
"""
Append a candle to the existing external dataframe. The incoming dataframe
@@ -204,12 +202,14 @@ class DataProvider:
last_analyzed=last_analyzed,
timeframe=timeframe,
candle_type=candle_type,
producer_name=producer_name
producer_name=producer_name,
)
return (True, 0)
if (producer_name not in self.__producer_pairs_df
or pair_key not in self.__producer_pairs_df[producer_name]):
if (
producer_name not in self.__producer_pairs_df
or pair_key not in self.__producer_pairs_df[producer_name]
):
# We don't have data from this producer yet,
# or we don't have data for this pair_key
# return False and 1000 for the full df
@@ -220,12 +220,12 @@ class DataProvider:
# CHECK FOR MISSING CANDLES
# Convert the timeframe to a timedelta for pandas
timeframe_delta: Timedelta = to_timedelta(timeframe)
local_last: Timestamp = existing_df.iloc[-1]['date'] # We want the last date from our copy
local_last: Timestamp = existing_df.iloc[-1]["date"] # We want the last date from our copy
# We want the first date from the incoming
incoming_first: Timestamp = dataframe.iloc[0]['date']
incoming_first: Timestamp = dataframe.iloc[0]["date"]
# Remove existing candles that are newer than the incoming first candle
existing_df1 = existing_df[existing_df['date'] < incoming_first]
existing_df1 = existing_df[existing_df["date"] < incoming_first]
candle_difference = (incoming_first - local_last) / timeframe_delta
@@ -243,13 +243,13 @@ class DataProvider:
# Everything is good, we appended
self._replace_external_df(
pair,
appended_df,
last_analyzed=last_analyzed,
timeframe=timeframe,
candle_type=candle_type,
producer_name=producer_name
)
pair,
appended_df,
last_analyzed=last_analyzed,
timeframe=timeframe,
candle_type=candle_type,
producer_name=producer_name,
)
return (True, 0)
def get_producer_df(
@@ -257,7 +257,7 @@ class DataProvider:
pair: str,
timeframe: Optional[str] = None,
candle_type: Optional[CandleType] = None,
producer_name: str = "default"
producer_name: str = "default",
) -> Tuple[DataFrame, datetime]:
"""
Get the pair data from producers.
@@ -292,64 +292,64 @@ class DataProvider:
"""
self._pairlists = pairlists
def historic_ohlcv(
self,
pair: str,
timeframe: str,
candle_type: str = ''
) -> DataFrame:
def historic_ohlcv(self, pair: str, timeframe: str, candle_type: str = "") -> DataFrame:
"""
Get stored historical candle (OHLCV) data
:param pair: pair to get the data for
:param timeframe: timeframe to get data for
:param candle_type: '', mark, index, premiumIndex, or funding_rate
"""
_candle_type = CandleType.from_string(
candle_type) if candle_type != '' else self._config['candle_type_def']
_candle_type = (
CandleType.from_string(candle_type)
if candle_type != ""
else self._config["candle_type_def"]
)
saved_pair: PairWithTimeframe = (pair, str(timeframe), _candle_type)
if saved_pair not in self.__cached_pairs_backtesting:
timerange = TimeRange.parse_timerange(None if self._config.get(
'timerange') is None else str(self._config.get('timerange')))
timerange = TimeRange.parse_timerange(
None
if self._config.get("timerange") is None
else str(self._config.get("timerange"))
)
startup_candles = self.get_required_startup(str(timeframe))
tf_seconds = timeframe_to_seconds(str(timeframe))
timerange.subtract_start(tf_seconds * startup_candles)
logger.info(f"Loading data for {pair} {timeframe} "
f"from {timerange.start_fmt} to {timerange.stop_fmt}")
logger.info(
f"Loading data for {pair} {timeframe} "
f"from {timerange.start_fmt} to {timerange.stop_fmt}"
)
self.__cached_pairs_backtesting[saved_pair] = load_pair_history(
pair=pair,
timeframe=timeframe,
datadir=self._config['datadir'],
datadir=self._config["datadir"],
timerange=timerange,
data_format=self._config['dataformat_ohlcv'],
data_format=self._config["dataformat_ohlcv"],
candle_type=_candle_type,
)
return self.__cached_pairs_backtesting[saved_pair].copy()
def get_required_startup(self, timeframe: str) -> int:
freqai_config = self._config.get('freqai', {})
if not freqai_config.get('enabled', False):
return self._config.get('startup_candle_count', 0)
freqai_config = self._config.get("freqai", {})
if not freqai_config.get("enabled", False):
return self._config.get("startup_candle_count", 0)
else:
startup_candles = self._config.get('startup_candle_count', 0)
indicator_periods = freqai_config['feature_parameters']['indicator_periods_candles']
startup_candles = self._config.get("startup_candle_count", 0)
indicator_periods = freqai_config["feature_parameters"]["indicator_periods_candles"]
# make sure the startupcandles is at least the set maximum indicator periods
self._config['startup_candle_count'] = max(startup_candles, max(indicator_periods))
self._config["startup_candle_count"] = max(startup_candles, max(indicator_periods))
tf_seconds = timeframe_to_seconds(timeframe)
train_candles = freqai_config['train_period_days'] * 86400 / tf_seconds
total_candles = int(self._config['startup_candle_count'] + train_candles)
train_candles = freqai_config["train_period_days"] * 86400 / tf_seconds
total_candles = int(self._config["startup_candle_count"] + train_candles)
logger.info(
f'Increasing startup_candle_count for freqai on {timeframe} to {total_candles}')
f"Increasing startup_candle_count for freqai on {timeframe} to {total_candles}"
)
return total_candles
def get_pair_dataframe(
self,
pair: str,
timeframe: Optional[str] = None,
candle_type: str = ''
self, pair: str, timeframe: Optional[str] = None, candle_type: str = ""
) -> DataFrame:
"""
Return pair candle (OHLCV) data, either live or cached historical -- depending
@@ -366,13 +366,13 @@ class DataProvider:
data = self.ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type)
else:
# Get historical OHLCV data (cached on disk).
timeframe = timeframe or self._config['timeframe']
timeframe = timeframe or self._config["timeframe"]
data = self.historic_ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type)
# Cut date to timeframe-specific date.
# This is necessary to prevent lookahead bias in callbacks through informative pairs.
if self.__slice_date:
cutoff_date = timeframe_to_prev_date(timeframe, self.__slice_date)
data = data.loc[data['date'] < cutoff_date]
data = data.loc[data["date"] < cutoff_date]
if len(data) == 0:
logger.warning(f"No data found for ({pair}, {timeframe}, {candle_type}).")
return data
@@ -387,7 +387,7 @@ class DataProvider:
combination.
Returns empty dataframe and Epoch 0 (1970-01-01) if no dataframe was cached.
"""
pair_key = (pair, timeframe, self._config.get('candle_type_def', CandleType.SPOT))
pair_key = (pair, timeframe, self._config.get("candle_type_def", CandleType.SPOT))
if pair_key in self.__cached_pairs:
if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):
df, date = self.__cached_pairs[pair_key]
@@ -395,7 +395,7 @@ class DataProvider:
df, date = self.__cached_pairs[pair_key]
if self.__slice_index is not None:
max_index = self.__slice_index
df = df.iloc[max(0, max_index - MAX_DATAFRAME_CANDLES):max_index]
df = df.iloc[max(0, max_index - MAX_DATAFRAME_CANDLES) : max_index]
return df, date
else:
return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc))
@@ -406,7 +406,7 @@ class DataProvider:
Get runmode of the bot
can be "live", "dry-run", "backtest", "edgecli", "hyperopt" or "other".
"""
return RunMode(self._config.get('runmode', RunMode.OTHER))
return RunMode(self._config.get("runmode", RunMode.OTHER))
def current_whitelist(self) -> List[str]:
"""
@@ -434,9 +434,11 @@ class DataProvider:
# Exchange functions
def refresh(self,
pairlist: ListPairsWithTimeframes,
helping_pairs: Optional[ListPairsWithTimeframes] = None) -> None:
def refresh(
self,
pairlist: ListPairsWithTimeframes,
helping_pairs: Optional[ListPairsWithTimeframes] = None,
) -> None:
"""
Refresh data, called with each cycle
"""
@@ -456,11 +458,7 @@ class DataProvider:
return list(self._exchange._klines.keys())
def ohlcv(
self,
pair: str,
timeframe: Optional[str] = None,
copy: bool = True,
candle_type: str = ''
self, pair: str, timeframe: Optional[str] = None, copy: bool = True, candle_type: str = ""
) -> DataFrame:
"""
Get candle (OHLCV) data for the given pair as DataFrame
@@ -474,11 +472,13 @@ class DataProvider:
if self._exchange is None:
raise OperationalException(NO_EXCHANGE_EXCEPTION)
if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):
_candle_type = CandleType.from_string(
candle_type) if candle_type != '' else self._config['candle_type_def']
_candle_type = (
CandleType.from_string(candle_type)
if candle_type != ""
else self._config["candle_type_def"]
)
return self._exchange.klines(
(pair, timeframe or self._config['timeframe'], _candle_type),
copy=copy
(pair, timeframe or self._config["timeframe"], _candle_type), copy=copy
)
else:
return DataFrame()

View File

@@ -8,8 +8,11 @@ from tabulate import tabulate
from freqtrade.configuration import TimeRange
from freqtrade.constants import Config
from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data,
load_backtest_stats)
from freqtrade.data.btanalysis import (
get_latest_backtest_filename,
load_backtest_data,
load_backtest_stats,
)
from freqtrade.exceptions import OperationalException
@@ -18,9 +21,10 @@ logger = logging.getLogger(__name__)
def _load_backtest_analysis_data(backtest_dir: Path, name: str):
if backtest_dir.is_dir():
scpf = Path(backtest_dir,
Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl"
)
scpf = Path(
backtest_dir,
Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl",
)
else:
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
@@ -53,7 +57,8 @@ def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_cand
for pair in pairlist:
if pair in signal_candles[strategy_name]:
analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators(
pair, trades, signal_candles[strategy_name][pair])
pair, trades, signal_candles[strategy_name][pair]
)
except Exception as e:
print(f"Cannot process entry/exit reasons for {strategy_name}: ", e)
@@ -64,28 +69,28 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles:
buyf = signal_candles
if len(buyf) > 0:
buyf = buyf.set_index('date', drop=False)
trades_red = trades.loc[trades['pair'] == pair].copy()
buyf = buyf.set_index("date", drop=False)
trades_red = trades.loc[trades["pair"] == pair].copy()
trades_inds = pd.DataFrame()
if trades_red.shape[0] > 0 and buyf.shape[0] > 0:
for t, v in trades_red.open_date.items():
allinds = buyf.loc[(buyf['date'] < v)]
allinds = buyf.loc[(buyf["date"] < v)]
if allinds.shape[0] > 0:
tmp_inds = allinds.iloc[[-1]]
trades_red.loc[t, 'signal_date'] = tmp_inds['date'].values[0]
trades_red.loc[t, 'enter_reason'] = trades_red.loc[t, 'enter_tag']
tmp_inds.index.rename('signal_date', inplace=True)
trades_red.loc[t, "signal_date"] = tmp_inds["date"].values[0]
trades_red.loc[t, "enter_reason"] = trades_red.loc[t, "enter_tag"]
tmp_inds.index.rename("signal_date", inplace=True)
trades_inds = pd.concat([trades_inds, tmp_inds])
if 'signal_date' in trades_red:
trades_red['signal_date'] = pd.to_datetime(trades_red['signal_date'], utc=True)
trades_red.set_index('signal_date', inplace=True)
if "signal_date" in trades_red:
trades_red["signal_date"] = pd.to_datetime(trades_red["signal_date"], utc=True)
trades_red.set_index("signal_date", inplace=True)
try:
trades_red = pd.merge(trades_red, trades_inds, on='signal_date', how='outer')
trades_red = pd.merge(trades_red, trades_inds, on="signal_date", how="outer")
except Exception as e:
raise e
return trades_red
@@ -93,138 +98,166 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles:
return pd.DataFrame()
def _do_group_table_output(bigdf, glist, csv_path: Path, to_csv=False, ):
def _do_group_table_output(
bigdf,
glist,
csv_path: Path,
to_csv=False,
):
for g in glist:
# 0: summary wins/losses grouped by enter tag
if g == "0":
group_mask = ['enter_reason']
wins = bigdf.loc[bigdf['profit_abs'] >= 0] \
.groupby(group_mask) \
.agg({'profit_abs': ['sum']})
group_mask = ["enter_reason"]
wins = (
bigdf.loc[bigdf["profit_abs"] >= 0].groupby(group_mask).agg({"profit_abs": ["sum"]})
)
wins.columns = ['profit_abs_wins']
loss = bigdf.loc[bigdf['profit_abs'] < 0] \
.groupby(group_mask) \
.agg({'profit_abs': ['sum']})
loss.columns = ['profit_abs_loss']
wins.columns = ["profit_abs_wins"]
loss = (
bigdf.loc[bigdf["profit_abs"] < 0].groupby(group_mask).agg({"profit_abs": ["sum"]})
)
loss.columns = ["profit_abs_loss"]
new = bigdf.groupby(group_mask).agg({'profit_abs': [
'count',
lambda x: sum(x > 0),
lambda x: sum(x <= 0)]})
new = bigdf.groupby(group_mask).agg(
{"profit_abs": ["count", lambda x: sum(x > 0), lambda x: sum(x <= 0)]}
)
new = pd.concat([new, wins, loss], axis=1).fillna(0)
new['profit_tot'] = new['profit_abs_wins'] - abs(new['profit_abs_loss'])
new['wl_ratio_pct'] = (new.iloc[:, 1] / new.iloc[:, 0] * 100).fillna(0)
new['avg_win'] = (new['profit_abs_wins'] / new.iloc[:, 1]).fillna(0)
new['avg_loss'] = (new['profit_abs_loss'] / new.iloc[:, 2]).fillna(0)
new["profit_tot"] = new["profit_abs_wins"] - abs(new["profit_abs_loss"])
new["wl_ratio_pct"] = (new.iloc[:, 1] / new.iloc[:, 0] * 100).fillna(0)
new["avg_win"] = (new["profit_abs_wins"] / new.iloc[:, 1]).fillna(0)
new["avg_loss"] = (new["profit_abs_loss"] / new.iloc[:, 2]).fillna(0)
new['exp_ratio'] = (
(
(1 + (new['avg_win'] / abs(new['avg_loss']))) * (new['wl_ratio_pct'] / 100)
) - 1).fillna(0)
new["exp_ratio"] = (
((1 + (new["avg_win"] / abs(new["avg_loss"]))) * (new["wl_ratio_pct"] / 100)) - 1
).fillna(0)
new.columns = ['total_num_buys', 'wins', 'losses',
'profit_abs_wins', 'profit_abs_loss',
'profit_tot', 'wl_ratio_pct',
'avg_win', 'avg_loss', 'exp_ratio']
new.columns = [
"total_num_buys",
"wins",
"losses",
"profit_abs_wins",
"profit_abs_loss",
"profit_tot",
"wl_ratio_pct",
"avg_win",
"avg_loss",
"exp_ratio",
]
sortcols = ['total_num_buys']
sortcols = ["total_num_buys"]
_print_table(new, sortcols, show_index=True, name="Group 0:",
to_csv=to_csv, csv_path=csv_path)
_print_table(
new, sortcols, show_index=True, name="Group 0:", to_csv=to_csv, csv_path=csv_path
)
else:
agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'],
'profit_ratio': ['median', 'mean', 'sum']}
agg_cols = ['num_buys', 'profit_abs_sum', 'profit_abs_median',
'profit_abs_mean', 'median_profit_pct', 'mean_profit_pct',
'total_profit_pct']
sortcols = ['profit_abs_sum', 'enter_reason']
agg_mask = {
"profit_abs": ["count", "sum", "median", "mean"],
"profit_ratio": ["median", "mean", "sum"],
}
agg_cols = [
"num_buys",
"profit_abs_sum",
"profit_abs_median",
"profit_abs_mean",
"median_profit_pct",
"mean_profit_pct",
"total_profit_pct",
]
sortcols = ["profit_abs_sum", "enter_reason"]
# 1: profit summaries grouped by enter_tag
if g == "1":
group_mask = ['enter_reason']
group_mask = ["enter_reason"]
# 2: profit summaries grouped by enter_tag and exit_tag
if g == "2":
group_mask = ['enter_reason', 'exit_reason']
group_mask = ["enter_reason", "exit_reason"]
# 3: profit summaries grouped by pair and enter_tag
if g == "3":
group_mask = ['pair', 'enter_reason']
group_mask = ["pair", "enter_reason"]
# 4: profit summaries grouped by pair, enter_ and exit_tag (this can get quite large)
if g == "4":
group_mask = ['pair', 'enter_reason', 'exit_reason']
group_mask = ["pair", "enter_reason", "exit_reason"]
# 5: profit summaries grouped by exit_tag
if g == "5":
group_mask = ['exit_reason']
sortcols = ['exit_reason']
group_mask = ["exit_reason"]
sortcols = ["exit_reason"]
if group_mask:
new = bigdf.groupby(group_mask).agg(agg_mask).reset_index()
new.columns = group_mask + agg_cols
new['median_profit_pct'] = new['median_profit_pct'] * 100
new['mean_profit_pct'] = new['mean_profit_pct'] * 100
new['total_profit_pct'] = new['total_profit_pct'] * 100
new["median_profit_pct"] = new["median_profit_pct"] * 100
new["mean_profit_pct"] = new["mean_profit_pct"] * 100
new["total_profit_pct"] = new["total_profit_pct"] * 100
_print_table(new, sortcols, name=f"Group {g}:",
to_csv=to_csv, csv_path=csv_path)
_print_table(new, sortcols, name=f"Group {g}:", to_csv=to_csv, csv_path=csv_path)
else:
logger.warning("Invalid group mask specified.")
def _do_rejected_signals_output(rejected_signals_df: pd.DataFrame,
to_csv: bool = False, csv_path=None) -> None:
cols = ['pair', 'date', 'enter_tag']
sortcols = ['date', 'pair', 'enter_tag']
_print_table(rejected_signals_df[cols],
sortcols,
show_index=False,
name="Rejected Signals:",
to_csv=to_csv,
csv_path=csv_path)
def _do_rejected_signals_output(
rejected_signals_df: pd.DataFrame, to_csv: bool = False, csv_path=None
) -> None:
cols = ["pair", "date", "enter_tag"]
sortcols = ["date", "pair", "enter_tag"]
_print_table(
rejected_signals_df[cols],
sortcols,
show_index=False,
name="Rejected Signals:",
to_csv=to_csv,
csv_path=csv_path,
)
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
def _select_rows_within_dates(df, timerange=None, df_date_col: str = "date"):
if timerange:
if timerange.starttype == 'date':
if timerange.starttype == "date":
df = df.loc[(df[df_date_col] >= timerange.startdt)]
if timerange.stoptype == 'date':
if timerange.stoptype == "date":
df = df.loc[(df[df_date_col] < timerange.stopdt)]
return df
def _select_rows_by_tags(df, enter_reason_list, exit_reason_list):
if enter_reason_list and "all" not in enter_reason_list:
df = df.loc[(df['enter_reason'].isin(enter_reason_list))]
df = df.loc[(df["enter_reason"].isin(enter_reason_list))]
if exit_reason_list and "all" not in exit_reason_list:
df = df.loc[(df['exit_reason'].isin(exit_reason_list))]
df = df.loc[(df["exit_reason"].isin(exit_reason_list))]
return df
def prepare_results(analysed_trades, stratname,
enter_reason_list, exit_reason_list,
timerange=None):
def prepare_results(
analysed_trades, stratname, enter_reason_list, exit_reason_list, timerange=None
):
res_df = pd.DataFrame()
for pair, trades in analysed_trades[stratname].items():
if (trades.shape[0] > 0):
trades.dropna(subset=['close_date'], inplace=True)
if trades.shape[0] > 0:
trades.dropna(subset=["close_date"], inplace=True)
res_df = pd.concat([res_df, trades], ignore_index=True)
res_df = _select_rows_within_dates(res_df, timerange)
if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns):
if res_df is not None and res_df.shape[0] > 0 and ("enter_reason" in res_df.columns):
res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list)
return res_df
def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_list: List[str],
csv_path: Path, rejected_signals=None, to_csv=False):
def print_results(
res_df: pd.DataFrame,
analysis_groups: List[str],
indicator_list: List[str],
csv_path: Path,
rejected_signals=None,
to_csv=False,
):
if res_df.shape[0] > 0:
if analysis_groups:
_do_group_table_output(res_df, analysis_groups, to_csv=to_csv, csv_path=csv_path)
@@ -237,30 +270,31 @@ def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_li
# NB this can be large for big dataframes!
if "all" in indicator_list:
_print_table(res_df,
show_index=False,
name="Indicators:",
to_csv=to_csv,
csv_path=csv_path)
_print_table(
res_df, show_index=False, name="Indicators:", to_csv=to_csv, csv_path=csv_path
)
elif indicator_list is not None and indicator_list:
available_inds = []
for ind in indicator_list:
if ind in res_df:
available_inds.append(ind)
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
_print_table(res_df[ilist],
sortcols=['exit_reason'],
show_index=False,
name="Indicators:",
to_csv=to_csv,
csv_path=csv_path)
_print_table(
res_df[ilist],
sortcols=["exit_reason"],
show_index=False,
name="Indicators:",
to_csv=to_csv,
csv_path=csv_path,
)
else:
print("\\No trades to show")
def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None,
to_csv=False, csv_path: Path):
if (sortcols is not None):
def _print_table(
df: pd.DataFrame, sortcols=None, *, show_index=False, name=None, to_csv=False, csv_path: Path
):
if sortcols is not None:
data = df.sort_values(sortcols)
else:
data = df
@@ -273,60 +307,64 @@ def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None
if name is not None:
print(name)
print(
tabulate(
data,
headers='keys',
tablefmt='psql',
showindex=show_index
)
)
print(tabulate(data, headers="keys", tablefmt="psql", showindex=show_index))
def process_entry_exit_reasons(config: Config):
try:
analysis_groups = config.get('analysis_groups', [])
enter_reason_list = config.get('enter_reason_list', ["all"])
exit_reason_list = config.get('exit_reason_list', ["all"])
indicator_list = config.get('indicator_list', [])
do_rejected = config.get('analysis_rejected', False)
to_csv = config.get('analysis_to_csv', False)
csv_path = Path(config.get('analysis_csv_path', config['exportfilename']))
analysis_groups = config.get("analysis_groups", [])
enter_reason_list = config.get("enter_reason_list", ["all"])
exit_reason_list = config.get("exit_reason_list", ["all"])
indicator_list = config.get("indicator_list", [])
do_rejected = config.get("analysis_rejected", False)
to_csv = config.get("analysis_to_csv", False)
csv_path = Path(config.get("analysis_csv_path", config["exportfilename"]))
if to_csv and not csv_path.is_dir():
raise OperationalException(f"Specified directory {csv_path} does not exist.")
timerange = TimeRange.parse_timerange(None if config.get(
'timerange') is None else str(config.get('timerange')))
timerange = TimeRange.parse_timerange(
None if config.get("timerange") is None else str(config.get("timerange"))
)
backtest_stats = load_backtest_stats(config['exportfilename'])
backtest_stats = load_backtest_stats(config["exportfilename"])
for strategy_name, results in backtest_stats['strategy'].items():
trades = load_backtest_data(config['exportfilename'], strategy_name)
for strategy_name, results in backtest_stats["strategy"].items():
trades = load_backtest_data(config["exportfilename"], strategy_name)
if trades is not None and not trades.empty:
signal_candles = _load_signal_candles(config['exportfilename'])
signal_candles = _load_signal_candles(config["exportfilename"])
rej_df = None
if do_rejected:
rejected_signals_dict = _load_rejected_signals(config['exportfilename'])
rej_df = prepare_results(rejected_signals_dict, strategy_name,
enter_reason_list, exit_reason_list,
timerange=timerange)
rejected_signals_dict = _load_rejected_signals(config["exportfilename"])
rej_df = prepare_results(
rejected_signals_dict,
strategy_name,
enter_reason_list,
exit_reason_list,
timerange=timerange,
)
analysed_trades_dict = _process_candles_and_indicators(
config['exchange']['pair_whitelist'], strategy_name,
trades, signal_candles)
config["exchange"]["pair_whitelist"], strategy_name, trades, signal_candles
)
res_df = prepare_results(analysed_trades_dict, strategy_name,
enter_reason_list, exit_reason_list,
timerange=timerange)
res_df = prepare_results(
analysed_trades_dict,
strategy_name,
enter_reason_list,
exit_reason_list,
timerange=timerange,
)
print_results(res_df,
analysis_groups,
indicator_list,
rejected_signals=rej_df,
to_csv=to_csv,
csv_path=csv_path)
print_results(
res_df,
analysis_groups,
indicator_list,
rejected_signals=rej_df,
to_csv=to_csv,
csv_path=csv_path,
)
except ValueError as e:
raise OperationalException(e) from e

View File

@@ -5,8 +5,17 @@ Includes:
* load data for a pair (or a list of pairs) from disk
* download data from exchange and store to disk
"""
# flake8: noqa: F401
from .datahandlers import get_datahandler
from .history_utils import (convert_trades_to_ohlcv, download_data_main, get_timerange, load_data,
load_pair_history, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data, refresh_data, validate_backtest_data)
from .history_utils import (
convert_trades_to_ohlcv,
download_data_main,
get_timerange,
load_data,
load_pair_history,
refresh_backtest_ohlcv_data,
refresh_backtest_trades_data,
refresh_data,
validate_backtest_data,
)

View File

@@ -14,11 +14,11 @@ logger = logging.getLogger(__name__)
class FeatherDataHandler(IDataHandler):
_columns = DEFAULT_DATAFRAME_COLUMNS
def ohlcv_store(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None:
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Store data in json format "values".
format looks as follows:
@@ -33,11 +33,12 @@ class FeatherDataHandler(IDataHandler):
self.create_dir_if_needed(filename)
data.reset_index(drop=True).loc[:, self._columns].to_feather(
filename, compression_level=9, compression='lz4')
filename, compression_level=9, compression="lz4"
)
def _ohlcv_load(self, pair: str, timeframe: str,
timerange: Optional[TimeRange], candle_type: CandleType
) -> DataFrame:
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
Implements the loading and conversion to a Pandas dataframe.
@@ -50,28 +51,31 @@ class FeatherDataHandler(IDataHandler):
:param candle_type: Any of the enum CandleType (must match trading mode!)
:return: DataFrame with ohlcv data, or empty DataFrame
"""
filename = self._pair_data_filename(
self._datadir, pair, timeframe, candle_type=candle_type)
filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type)
if not filename.exists():
# Fallback mode for 1M files
filename = self._pair_data_filename(
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True)
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True
)
if not filename.exists():
return DataFrame(columns=self._columns)
pairdata = read_feather(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
'low': 'float', 'close': 'float', 'volume': 'float'})
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
def ohlcv_append(
self,
pair: str,
timeframe: str,
data: DataFrame,
candle_type: CandleType
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Append data to existing data structures
@@ -92,7 +96,7 @@ class FeatherDataHandler(IDataHandler):
"""
filename = self._pair_trades_filename(self._datadir, pair, trading_mode)
self.create_dir_if_needed(filename)
data.reset_index(drop=True).to_feather(filename, compression_level=9, compression='lz4')
data.reset_index(drop=True).to_feather(filename, compression_level=9, compression="lz4")
def trades_append(self, pair: str, data: DataFrame):
"""
@@ -104,7 +108,7 @@ class FeatherDataHandler(IDataHandler):
raise NotImplementedError()
def _trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json

View File

@@ -15,11 +15,11 @@ logger = logging.getLogger(__name__)
class HDF5DataHandler(IDataHandler):
_columns = DEFAULT_DATAFRAME_COLUMNS
def ohlcv_store(
self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType) -> None:
self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType
) -> None:
"""
Store data in hdf5 file.
:param pair: Pair - used to generate filename
@@ -35,13 +35,18 @@ class HDF5DataHandler(IDataHandler):
self.create_dir_if_needed(filename)
_data.loc[:, self._columns].to_hdf(
filename, key=key, mode='a', complevel=9, complib='blosc',
format='table', data_columns=['date']
filename,
key=key,
mode="a",
complevel=9,
complib="blosc",
format="table",
data_columns=["date"],
)
def _ohlcv_load(self, pair: str, timeframe: str,
timerange: Optional[TimeRange], candle_type: CandleType
) -> pd.DataFrame:
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
) -> pd.DataFrame:
"""
Internal method used to load data for one pair from disk.
Implements the loading and conversion to a Pandas dataframe.
@@ -55,41 +60,40 @@ class HDF5DataHandler(IDataHandler):
:return: DataFrame with ohlcv data, or empty DataFrame
"""
key = self._pair_ohlcv_key(pair, timeframe)
filename = self._pair_data_filename(
self._datadir,
pair,
timeframe,
candle_type=candle_type
)
filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type)
if not filename.exists():
# Fallback mode for 1M files
filename = self._pair_data_filename(
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True)
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True
)
if not filename.exists():
return pd.DataFrame(columns=self._columns)
where = []
if timerange:
if timerange.starttype == 'date':
if timerange.starttype == "date":
where.append(f"date >= Timestamp({timerange.startts * 1e9})")
if timerange.stoptype == 'date':
if timerange.stoptype == "date":
where.append(f"date <= Timestamp({timerange.stopts * 1e9})")
pairdata = pd.read_hdf(filename, key=key, mode="r", where=where)
if list(pairdata.columns) != self._columns:
raise ValueError("Wrong dataframe format")
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
'low': 'float', 'close': 'float', 'volume': 'float'})
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata = pairdata.reset_index(drop=True)
return pairdata
def ohlcv_append(
self,
pair: str,
timeframe: str,
data: pd.DataFrame,
candle_type: CandleType
self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType
) -> None:
"""
Append data to existing data structures
@@ -111,9 +115,13 @@ class HDF5DataHandler(IDataHandler):
key = self._pair_trades_key(pair)
data.to_hdf(
self._pair_trades_filename(self._datadir, pair, trading_mode), key=key,
mode='a', complevel=9, complib='blosc',
format='table', data_columns=['timestamp']
self._pair_trades_filename(self._datadir, pair, trading_mode),
key=key,
mode="a",
complevel=9,
complib="blosc",
format="table",
data_columns=["timestamp"],
)
def trades_append(self, pair: str, data: pd.DataFrame):
@@ -142,13 +150,13 @@ class HDF5DataHandler(IDataHandler):
return pd.DataFrame(columns=DEFAULT_TRADES_COLUMNS)
where = []
if timerange:
if timerange.starttype == 'date':
if timerange.starttype == "date":
where.append(f"timestamp >= {timerange.startts * 1e3}")
if timerange.stoptype == 'date':
if timerange.stoptype == "date":
where.append(f"timestamp < {timerange.stopts * 1e3}")
trades: pd.DataFrame = pd.read_hdf(filename, key=key, mode="r", where=where)
trades[['id', 'type']] = trades[['id', 'type']].replace({np.nan: None})
trades[["id", "type"]] = trades[["id", "type"]].replace({np.nan: None})
return trades
@classmethod
@@ -158,7 +166,7 @@ class HDF5DataHandler(IDataHandler):
@classmethod
def _pair_ohlcv_key(cls, pair: str, timeframe: str) -> str:
# Escape futures pairs to avoid warnings
pair_esc = pair.replace(':', '_')
pair_esc = pair.replace(":", "_")
return f"{pair_esc}/ohlcv/tf_{timeframe}"
@classmethod

View File

@@ -3,6 +3,7 @@ Abstract datahandler interface.
It's subclasses handle and storing data from disk.
"""
import logging
import re
from abc import ABC, abstractmethod
@@ -16,8 +17,12 @@ from pandas import DataFrame
from freqtrade import misc
from freqtrade.configuration import TimeRange
from freqtrade.constants import DEFAULT_TRADES_COLUMNS, ListPairsWithTimeframes
from freqtrade.data.converter import (clean_ohlcv_dataframe, trades_convert_types,
trades_df_remove_duplicates, trim_dataframe)
from freqtrade.data.converter import (
clean_ohlcv_dataframe,
trades_convert_types,
trades_df_remove_duplicates,
trim_dataframe,
)
from freqtrade.enums import CandleType, TradingMode
from freqtrade.exchange import timeframe_to_seconds
@@ -26,8 +31,7 @@ logger = logging.getLogger(__name__)
class IDataHandler(ABC):
_OHLCV_REGEX = r'^([a-zA-Z_\d-]+)\-(\d+[a-zA-Z]{1,2})\-?([a-zA-Z_]*)?(?=\.)'
_OHLCV_REGEX = r"^([a-zA-Z_\d-]+)\-(\d+[a-zA-Z]{1,2})\-?([a-zA-Z_]*)?(?=\.)"
def __init__(self, datadir: Path) -> None:
self._datadir = datadir
@@ -41,7 +45,8 @@ class IDataHandler(ABC):
@classmethod
def ohlcv_get_available_data(
cls, datadir: Path, trading_mode: TradingMode) -> ListPairsWithTimeframes:
cls, datadir: Path, trading_mode: TradingMode
) -> ListPairsWithTimeframes:
"""
Returns a list of all pairs with ohlcv data available in this datadir
:param datadir: Directory to search for ohlcv files
@@ -49,17 +54,20 @@ class IDataHandler(ABC):
:return: List of Tuples of (pair, timeframe, CandleType)
"""
if trading_mode == TradingMode.FUTURES:
datadir = datadir.joinpath('futures')
datadir = datadir.joinpath("futures")
_tmp = [
re.search(
cls._OHLCV_REGEX, p.name
) for p in datadir.glob(f"*.{cls._get_file_extension()}")]
re.search(cls._OHLCV_REGEX, p.name)
for p in datadir.glob(f"*.{cls._get_file_extension()}")
]
return [
(
cls.rebuild_pair_from_filename(match[1]),
cls.rebuild_timeframe_from_filename(match[2]),
CandleType.from_string(match[3])
) for match in _tmp if match and len(match.groups()) > 1]
CandleType.from_string(match[3]),
)
for match in _tmp
if match and len(match.groups()) > 1
]
@classmethod
def ohlcv_get_pairs(cls, datadir: Path, timeframe: str, candle_type: CandleType) -> List[str]:
@@ -73,17 +81,20 @@ class IDataHandler(ABC):
"""
candle = ""
if candle_type != CandleType.SPOT:
datadir = datadir.joinpath('futures')
datadir = datadir.joinpath("futures")
candle = f"-{candle_type}"
ext = cls._get_file_extension()
_tmp = [re.search(r'^(\S+)(?=\-' + timeframe + candle + f'.{ext})', p.name)
for p in datadir.glob(f"*{timeframe}{candle}.{ext}")]
_tmp = [
re.search(r"^(\S+)(?=\-" + timeframe + candle + f".{ext})", p.name)
for p in datadir.glob(f"*{timeframe}{candle}.{ext}")
]
# Check if regex found something and only return these results
return [cls.rebuild_pair_from_filename(match[0]) for match in _tmp if match]
@abstractmethod
def ohlcv_store(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None:
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Store ohlcv data.
:param pair: Pair - used to generate filename
@@ -93,8 +104,9 @@ class IDataHandler(ABC):
:return: None
"""
def ohlcv_data_min_max(self, pair: str, timeframe: str,
candle_type: CandleType) -> Tuple[datetime, datetime, int]:
def ohlcv_data_min_max(
self, pair: str, timeframe: str, candle_type: CandleType
) -> Tuple[datetime, datetime, int]:
"""
Returns the min and max timestamp for the given pair and timeframe.
:param pair: Pair to get min/max for
@@ -109,12 +121,12 @@ class IDataHandler(ABC):
datetime.fromtimestamp(0, tz=timezone.utc),
0,
)
return df.iloc[0]['date'].to_pydatetime(), df.iloc[-1]['date'].to_pydatetime(), len(df)
return df.iloc[0]["date"].to_pydatetime(), df.iloc[-1]["date"].to_pydatetime(), len(df)
@abstractmethod
def _ohlcv_load(self, pair: str, timeframe: str, timerange: Optional[TimeRange],
candle_type: CandleType
) -> DataFrame:
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
Implements the loading and conversion to a Pandas dataframe.
@@ -144,11 +156,7 @@ class IDataHandler(ABC):
@abstractmethod
def ohlcv_append(
self,
pair: str,
timeframe: str,
data: DataFrame,
candle_type: CandleType
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Append data to existing data structures
@@ -166,8 +174,10 @@ class IDataHandler(ABC):
:return: List of Pairs
"""
_ext = cls._get_file_extension()
_tmp = [re.search(r'^(\S+)(?=\-trades.' + _ext + ')', p.name)
for p in datadir.glob(f"*trades.{_ext}")]
_tmp = [
re.search(r"^(\S+)(?=\-trades." + _ext + ")", p.name)
for p in datadir.glob(f"*trades.{_ext}")
]
# Check if regex found something and only return these results to avoid exceptions.
return [cls.rebuild_pair_from_filename(match[0]) for match in _tmp if match]
@@ -227,7 +237,7 @@ class IDataHandler(ABC):
return False
def trades_load(
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None
) -> DataFrame:
"""
Load a pair from file, either .json.gz or .json
@@ -260,7 +270,7 @@ class IDataHandler(ABC):
pair: str,
timeframe: str,
candle_type: CandleType,
no_timeframe_modify: bool = False
no_timeframe_modify: bool = False,
) -> Path:
pair_s = misc.pair_to_filename(pair)
candle = ""
@@ -268,10 +278,9 @@ class IDataHandler(ABC):
timeframe = cls.timeframe_to_file(timeframe)
if candle_type != CandleType.SPOT:
datadir = datadir.joinpath('futures')
datadir = datadir.joinpath("futures")
candle = f"-{candle_type}"
filename = datadir.joinpath(
f'{pair_s}-{timeframe}{candle}.{cls._get_file_extension()}')
filename = datadir.joinpath(f"{pair_s}-{timeframe}{candle}.{cls._get_file_extension()}")
return filename
@classmethod
@@ -279,14 +288,14 @@ class IDataHandler(ABC):
pair_s = misc.pair_to_filename(pair)
if trading_mode == TradingMode.FUTURES:
# Futures pair ...
datadir = datadir.joinpath('futures')
datadir = datadir.joinpath("futures")
filename = datadir.joinpath(f'{pair_s}-trades.{cls._get_file_extension()}')
filename = datadir.joinpath(f"{pair_s}-trades.{cls._get_file_extension()}")
return filename
@staticmethod
def timeframe_to_file(timeframe: str):
return timeframe.replace('M', 'Mo')
return timeframe.replace("M", "Mo")
@staticmethod
def rebuild_timeframe_from_filename(timeframe: str) -> str:
@@ -294,7 +303,7 @@ class IDataHandler(ABC):
converts timeframe from disk to file
Replaces mo with M (to avoid problems on case-insensitive filesystems)
"""
return re.sub('1mo', '1M', timeframe, flags=re.IGNORECASE)
return re.sub("1mo", "1M", timeframe, flags=re.IGNORECASE)
@staticmethod
def rebuild_pair_from_filename(pair: str) -> str:
@@ -302,18 +311,22 @@ class IDataHandler(ABC):
Rebuild pair name from filename
Assumes a asset name of max. 7 length to also support BTC-PERP and BTC-PERP:USD names.
"""
res = re.sub(r'^(([A-Za-z\d]{1,10})|^([A-Za-z\-]{1,6}))(_)', r'\g<1>/', pair, count=1)
res = re.sub('_', ':', res, count=1)
res = re.sub(r"^(([A-Za-z\d]{1,10})|^([A-Za-z\-]{1,6}))(_)", r"\g<1>/", pair, count=1)
res = re.sub("_", ":", res, count=1)
return res
def ohlcv_load(self, pair, timeframe: str,
candle_type: CandleType, *,
timerange: Optional[TimeRange] = None,
fill_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
warn_no_data: bool = True,
) -> DataFrame:
def ohlcv_load(
self,
pair,
timeframe: str,
candle_type: CandleType,
*,
timerange: Optional[TimeRange] = None,
fill_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
warn_no_data: bool = True,
) -> DataFrame:
"""
Load cached candle (OHLCV) data for the given pair.
@@ -333,15 +346,12 @@ class IDataHandler(ABC):
timerange_startup.subtract_start(timeframe_to_seconds(timeframe) * startup_candles)
pairdf = self._ohlcv_load(
pair,
timeframe,
timerange=timerange_startup,
candle_type=candle_type
pair, timeframe, timerange=timerange_startup, candle_type=candle_type
)
if self._check_empty_df(pairdf, pair, timeframe, candle_type, warn_no_data):
return pairdf
else:
enddate = pairdf.iloc[-1]['date']
enddate = pairdf.iloc[-1]["date"]
if timerange_startup:
self._validate_pairdata(pair, pairdf, timeframe, candle_type, timerange_startup)
@@ -350,17 +360,25 @@ class IDataHandler(ABC):
return pairdf
# incomplete candles should only be dropped if we didn't trim the end beforehand.
pairdf = clean_ohlcv_dataframe(pairdf, timeframe,
pair=pair,
fill_missing=fill_missing,
drop_incomplete=(drop_incomplete and
enddate == pairdf.iloc[-1]['date']))
pairdf = clean_ohlcv_dataframe(
pairdf,
timeframe,
pair=pair,
fill_missing=fill_missing,
drop_incomplete=(drop_incomplete and enddate == pairdf.iloc[-1]["date"]),
)
self._check_empty_df(pairdf, pair, timeframe, candle_type, warn_no_data)
return pairdf
def _check_empty_df(
self, pairdf: DataFrame, pair: str, timeframe: str, candle_type: CandleType,
warn_no_data: bool, warn_price: bool = False) -> bool:
self,
pairdf: DataFrame,
pair: str,
timeframe: str,
candle_type: CandleType,
warn_no_data: bool,
warn_price: bool = False,
) -> bool:
"""
Warn on empty dataframe
"""
@@ -373,39 +391,55 @@ class IDataHandler(ABC):
return True
elif warn_price:
candle_price_gap = 0
if (candle_type in (CandleType.SPOT, CandleType.FUTURES) and
not pairdf.empty
and 'close' in pairdf.columns and 'open' in pairdf.columns):
if (
candle_type in (CandleType.SPOT, CandleType.FUTURES)
and not pairdf.empty
and "close" in pairdf.columns
and "open" in pairdf.columns
):
# Detect gaps between prior close and open
gaps = ((pairdf['open'] - pairdf['close'].shift(1)) / pairdf['close'].shift(1))
gaps = (pairdf["open"] - pairdf["close"].shift(1)) / pairdf["close"].shift(1)
gaps = gaps.dropna()
if len(gaps):
candle_price_gap = max(abs(gaps))
if candle_price_gap > 0.1:
logger.info(f"Price jump in {pair}, {timeframe}, {candle_type} between two candles "
f"of {candle_price_gap:.2%} detected.")
logger.info(
f"Price jump in {pair}, {timeframe}, {candle_type} between two candles "
f"of {candle_price_gap:.2%} detected."
)
return False
def _validate_pairdata(self, pair, pairdata: DataFrame, timeframe: str,
candle_type: CandleType, timerange: TimeRange):
def _validate_pairdata(
self,
pair,
pairdata: DataFrame,
timeframe: str,
candle_type: CandleType,
timerange: TimeRange,
):
"""
Validates pairdata for missing data at start end end and logs warnings.
:param pairdata: Dataframe to validate
:param timerange: Timerange specified for start and end dates
"""
if timerange.starttype == 'date':
if pairdata.iloc[0]['date'] > timerange.startdt:
logger.warning(f"{pair}, {candle_type}, {timeframe}, "
f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}")
if timerange.stoptype == 'date':
if pairdata.iloc[-1]['date'] < timerange.stopdt:
logger.warning(f"{pair}, {candle_type}, {timeframe}, "
f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}")
if timerange.starttype == "date":
if pairdata.iloc[0]["date"] > timerange.startdt:
logger.warning(
f"{pair}, {candle_type}, {timeframe}, "
f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}"
)
if timerange.stoptype == "date":
if pairdata.iloc[-1]["date"] < timerange.stopdt:
logger.warning(
f"{pair}, {candle_type}, {timeframe}, "
f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}"
)
def rename_futures_data(
self, pair: str, new_pair: str, timeframe: str, candle_type: CandleType):
self, pair: str, new_pair: str, timeframe: str, candle_type: CandleType
):
"""
Temporary method to migrate data from old naming to new naming (BTC/USDT -> BTC/USDT:USDT)
Only used for binance to support the binance futures naming unification.
@@ -431,18 +465,19 @@ class IDataHandler(ABC):
if funding_rate_combs:
logger.warning(
f'Migrating {len(funding_rate_combs)} funding fees to correct timeframe.')
f"Migrating {len(funding_rate_combs)} funding fees to correct timeframe."
)
for pair, timeframe, candletype in funding_rate_combs:
old_name = self._pair_data_filename(self._datadir, pair, timeframe, candletype)
new_name = self._pair_data_filename(self._datadir, pair, ff_timeframe, candletype)
if not Path(old_name).exists():
logger.warning(f'{old_name} does not exist, skipping.')
logger.warning(f"{old_name} does not exist, skipping.")
continue
if Path(new_name).exists():
logger.warning(f'{new_name} already exists, Removing.')
logger.warning(f"{new_name} already exists, Removing.")
Path(new_name).unlink()
Path(old_name).rename(new_name)
@@ -457,27 +492,33 @@ def get_datahandlerclass(datatype: str) -> Type[IDataHandler]:
:return: Datahandler class
"""
if datatype == 'json':
if datatype == "json":
from .jsondatahandler import JsonDataHandler
return JsonDataHandler
elif datatype == 'jsongz':
elif datatype == "jsongz":
from .jsondatahandler import JsonGzDataHandler
return JsonGzDataHandler
elif datatype == 'hdf5':
elif datatype == "hdf5":
from .hdf5datahandler import HDF5DataHandler
return HDF5DataHandler
elif datatype == 'feather':
elif datatype == "feather":
from .featherdatahandler import FeatherDataHandler
return FeatherDataHandler
elif datatype == 'parquet':
elif datatype == "parquet":
from .parquetdatahandler import ParquetDataHandler
return ParquetDataHandler
else:
raise ValueError(f"No datahandler for datatype {datatype} available.")
def get_datahandler(datadir: Path, data_format: Optional[str] = None,
data_handler: Optional[IDataHandler] = None) -> IDataHandler:
def get_datahandler(
datadir: Path, data_format: Optional[str] = None, data_handler: Optional[IDataHandler] = None
) -> IDataHandler:
"""
:param datadir: Folder to save data
:param data_format: dataformat to use
@@ -485,6 +526,6 @@ def get_datahandler(datadir: Path, data_format: Optional[str] = None,
"""
if not data_handler:
HandlerClass = get_datahandlerclass(data_format or 'feather')
HandlerClass = get_datahandlerclass(data_format or "feather")
data_handler = HandlerClass(datadir)
return data_handler

View File

@@ -17,12 +17,12 @@ logger = logging.getLogger(__name__)
class JsonDataHandler(IDataHandler):
_use_zip = False
_columns = DEFAULT_DATAFRAME_COLUMNS
def ohlcv_store(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None:
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Store data in json format "values".
format looks as follows:
@@ -37,16 +37,16 @@ class JsonDataHandler(IDataHandler):
self.create_dir_if_needed(filename)
_data = data.copy()
# Convert date to int
_data['date'] = _data['date'].astype(np.int64) // 1000 // 1000
_data["date"] = _data["date"].astype(np.int64) // 1000 // 1000
# Reset index, select only appropriate columns and save as json
_data.reset_index(drop=True).loc[:, self._columns].to_json(
filename, orient="values",
compression='gzip' if self._use_zip else None)
filename, orient="values", compression="gzip" if self._use_zip else None
)
def _ohlcv_load(self, pair: str, timeframe: str,
timerange: Optional[TimeRange], candle_type: CandleType
) -> DataFrame:
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
Implements the loading and conversion to a Pandas dataframe.
@@ -59,31 +59,34 @@ class JsonDataHandler(IDataHandler):
:param candle_type: Any of the enum CandleType (must match trading mode!)
:return: DataFrame with ohlcv data, or empty DataFrame
"""
filename = self._pair_data_filename(
self._datadir, pair, timeframe, candle_type=candle_type)
filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type)
if not filename.exists():
# Fallback mode for 1M files
filename = self._pair_data_filename(
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True)
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True
)
if not filename.exists():
return DataFrame(columns=self._columns)
try:
pairdata = read_json(filename, orient='values')
pairdata = read_json(filename, orient="values")
pairdata.columns = self._columns
except ValueError:
logger.error(f"Could not load data for {pair}.")
return DataFrame(columns=self._columns)
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
'low': 'float', 'close': 'float', 'volume': 'float'})
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
def ohlcv_append(
self,
pair: str,
timeframe: str,
data: DataFrame,
candle_type: CandleType
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Append data to existing data structures
@@ -145,5 +148,4 @@ class JsonDataHandler(IDataHandler):
class JsonGzDataHandler(JsonDataHandler):
_use_zip = True

View File

@@ -14,11 +14,11 @@ logger = logging.getLogger(__name__)
class ParquetDataHandler(IDataHandler):
_columns = DEFAULT_DATAFRAME_COLUMNS
def ohlcv_store(
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None:
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Store data in json format "values".
format looks as follows:
@@ -34,9 +34,9 @@ class ParquetDataHandler(IDataHandler):
data.reset_index(drop=True).loc[:, self._columns].to_parquet(filename)
def _ohlcv_load(self, pair: str, timeframe: str,
timerange: Optional[TimeRange], candle_type: CandleType
) -> DataFrame:
def _ohlcv_load(
self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType
) -> DataFrame:
"""
Internal method used to load data for one pair from disk.
Implements the loading and conversion to a Pandas dataframe.
@@ -49,28 +49,31 @@ class ParquetDataHandler(IDataHandler):
:param candle_type: Any of the enum CandleType (must match trading mode!)
:return: DataFrame with ohlcv data, or empty DataFrame
"""
filename = self._pair_data_filename(
self._datadir, pair, timeframe, candle_type=candle_type)
filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type)
if not filename.exists():
# Fallback mode for 1M files
filename = self._pair_data_filename(
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True)
self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True
)
if not filename.exists():
return DataFrame(columns=self._columns)
pairdata = read_parquet(filename)
pairdata.columns = self._columns
pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float',
'low': 'float', 'close': 'float', 'volume': 'float'})
pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True)
pairdata = pairdata.astype(
dtype={
"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
}
)
pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True)
return pairdata
def ohlcv_append(
self,
pair: str,
timeframe: str,
data: DataFrame,
candle_type: CandleType
self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType
) -> None:
"""
Append data to existing data structures

View File

@@ -7,11 +7,20 @@ from typing import Dict, List, Optional, Tuple
from pandas import DataFrame, concat
from freqtrade.configuration import TimeRange
from freqtrade.constants import (DATETIME_PRINT_FORMAT, DEFAULT_DATAFRAME_COLUMNS,
DL_DATA_TIMEFRAMES, DOCS_LINK, Config)
from freqtrade.data.converter import (clean_ohlcv_dataframe, convert_trades_to_ohlcv,
ohlcv_to_dataframe, trades_df_remove_duplicates,
trades_list_to_df)
from freqtrade.constants import (
DATETIME_PRINT_FORMAT,
DEFAULT_DATAFRAME_COLUMNS,
DL_DATA_TIMEFRAMES,
DOCS_LINK,
Config,
)
from freqtrade.data.converter import (
clean_ohlcv_dataframe,
convert_trades_to_ohlcv,
ohlcv_to_dataframe,
trades_df_remove_duplicates,
trades_list_to_df,
)
from freqtrade.data.history.datahandlers import IDataHandler, get_datahandler
from freqtrade.enums import CandleType, TradingMode
from freqtrade.exceptions import OperationalException
@@ -25,17 +34,19 @@ from freqtrade.util.migrations import migrate_data
logger = logging.getLogger(__name__)
def load_pair_history(pair: str,
timeframe: str,
datadir: Path, *,
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
data_format: Optional[str] = None,
data_handler: Optional[IDataHandler] = None,
candle_type: CandleType = CandleType.SPOT
) -> DataFrame:
def load_pair_history(
pair: str,
timeframe: str,
datadir: Path,
*,
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
drop_incomplete: bool = False,
startup_candles: int = 0,
data_format: Optional[str] = None,
data_handler: Optional[IDataHandler] = None,
candle_type: CandleType = CandleType.SPOT,
) -> DataFrame:
"""
Load cached ohlcv history for the given pair.
@@ -54,27 +65,30 @@ def load_pair_history(pair: str,
"""
data_handler = get_datahandler(datadir, data_format, data_handler)
return data_handler.ohlcv_load(pair=pair,
timeframe=timeframe,
timerange=timerange,
fill_missing=fill_up_missing,
drop_incomplete=drop_incomplete,
startup_candles=startup_candles,
candle_type=candle_type,
)
return data_handler.ohlcv_load(
pair=pair,
timeframe=timeframe,
timerange=timerange,
fill_missing=fill_up_missing,
drop_incomplete=drop_incomplete,
startup_candles=startup_candles,
candle_type=candle_type,
)
def load_data(datadir: Path,
timeframe: str,
pairs: List[str], *,
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
startup_candles: int = 0,
fail_without_data: bool = False,
data_format: str = 'feather',
candle_type: CandleType = CandleType.SPOT,
user_futures_funding_rate: Optional[int] = None,
) -> Dict[str, DataFrame]:
def load_data(
datadir: Path,
timeframe: str,
pairs: List[str],
*,
timerange: Optional[TimeRange] = None,
fill_up_missing: bool = True,
startup_candles: int = 0,
fail_without_data: bool = False,
data_format: str = "feather",
candle_type: CandleType = CandleType.SPOT,
user_futures_funding_rate: Optional[int] = None,
) -> Dict[str, DataFrame]:
"""
Load ohlcv history data for a list of pairs.
@@ -91,18 +105,21 @@ def load_data(datadir: Path,
"""
result: Dict[str, DataFrame] = {}
if startup_candles > 0 and timerange:
logger.info(f'Using indicator startup period: {startup_candles} ...')
logger.info(f"Using indicator startup period: {startup_candles} ...")
data_handler = get_datahandler(datadir, data_format)
for pair in pairs:
hist = load_pair_history(pair=pair, timeframe=timeframe,
datadir=datadir, timerange=timerange,
fill_up_missing=fill_up_missing,
startup_candles=startup_candles,
data_handler=data_handler,
candle_type=candle_type,
)
hist = load_pair_history(
pair=pair,
timeframe=timeframe,
datadir=datadir,
timerange=timerange,
fill_up_missing=fill_up_missing,
startup_candles=startup_candles,
data_handler=data_handler,
candle_type=candle_type,
)
if not hist.empty:
result[pair] = hist
else:
@@ -116,14 +133,16 @@ def load_data(datadir: Path,
return result
def refresh_data(*, datadir: Path,
timeframe: str,
pairs: List[str],
exchange: Exchange,
data_format: Optional[str] = None,
timerange: Optional[TimeRange] = None,
candle_type: CandleType,
) -> None:
def refresh_data(
*,
datadir: Path,
timeframe: str,
pairs: List[str],
exchange: Exchange,
data_format: Optional[str] = None,
timerange: Optional[TimeRange] = None,
candle_type: CandleType,
) -> None:
"""
Refresh ohlcv history data for a list of pairs.
@@ -137,11 +156,17 @@ def refresh_data(*, datadir: Path,
"""
data_handler = get_datahandler(datadir, data_format)
for idx, pair in enumerate(pairs):
process = f'{idx}/{len(pairs)}'
_download_pair_history(pair=pair, process=process,
timeframe=timeframe, datadir=datadir,
timerange=timerange, exchange=exchange, data_handler=data_handler,
candle_type=candle_type)
process = f"{idx}/{len(pairs)}"
_download_pair_history(
pair=pair,
process=process,
timeframe=timeframe,
datadir=datadir,
timerange=timerange,
exchange=exchange,
data_handler=data_handler,
candle_type=candle_type,
)
def _load_cached_data_for_updating(
@@ -163,42 +188,49 @@ def _load_cached_data_for_updating(
start = None
end = None
if timerange:
if timerange.starttype == 'date':
if timerange.starttype == "date":
start = timerange.startdt
if timerange.stoptype == 'date':
if timerange.stoptype == "date":
end = timerange.stopdt
# Intentionally don't pass timerange in - since we need to load the full dataset.
data = data_handler.ohlcv_load(pair, timeframe=timeframe,
timerange=None, fill_missing=False,
drop_incomplete=True, warn_no_data=False,
candle_type=candle_type)
data = data_handler.ohlcv_load(
pair,
timeframe=timeframe,
timerange=None,
fill_missing=False,
drop_incomplete=True,
warn_no_data=False,
candle_type=candle_type,
)
if not data.empty:
if not prepend and start and start < data.iloc[0]['date']:
if not prepend and start and start < data.iloc[0]["date"]:
# Earlier data than existing data requested, redownload all
data = DataFrame(columns=DEFAULT_DATAFRAME_COLUMNS)
else:
if prepend:
end = data.iloc[0]['date']
end = data.iloc[0]["date"]
else:
start = data.iloc[-1]['date']
start = data.iloc[-1]["date"]
start_ms = int(start.timestamp() * 1000) if start else None
end_ms = int(end.timestamp() * 1000) if end else None
return data, start_ms, end_ms
def _download_pair_history(pair: str, *,
datadir: Path,
exchange: Exchange,
timeframe: str = '5m',
process: str = '',
new_pairs_days: int = 30,
data_handler: Optional[IDataHandler] = None,
timerange: Optional[TimeRange] = None,
candle_type: CandleType,
erase: bool = False,
prepend: bool = False,
) -> bool:
def _download_pair_history(
pair: str,
*,
datadir: Path,
exchange: Exchange,
timeframe: str = "5m",
process: str = "",
new_pairs_days: int = 30,
data_handler: Optional[IDataHandler] = None,
timerange: Optional[TimeRange] = None,
candle_type: CandleType,
erase: bool = False,
prepend: bool = False,
) -> bool:
"""
Download latest candles from the exchange for the pair and timeframe passed in parameters
The data is downloaded starting from the last correct data that
@@ -217,54 +249,71 @@ def _download_pair_history(pair: str, *,
try:
if erase:
if data_handler.ohlcv_purge(pair, timeframe, candle_type=candle_type):
logger.info(f'Deleting existing data for pair {pair}, {timeframe}, {candle_type}.')
logger.info(f"Deleting existing data for pair {pair}, {timeframe}, {candle_type}.")
data, since_ms, until_ms = _load_cached_data_for_updating(
pair, timeframe, timerange,
pair,
timeframe,
timerange,
data_handler=data_handler,
candle_type=candle_type,
prepend=prepend)
prepend=prepend,
)
logger.info(f'({process}) - Download history data for "{pair}", {timeframe}, '
f'{candle_type} and store in {datadir}. '
f'From {format_ms_time(since_ms) if since_ms else "start"} to '
f'{format_ms_time(until_ms) if until_ms else "now"}'
)
logger.info(
f'({process}) - Download history data for "{pair}", {timeframe}, '
f"{candle_type} and store in {datadir}. "
f'From {format_ms_time(since_ms) if since_ms else "start"} to '
f'{format_ms_time(until_ms) if until_ms else "now"}'
)
logger.debug("Current Start: %s",
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug("Current End: %s",
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug(
"Current Start: %s",
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None",
)
logger.debug(
"Current End: %s",
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None",
)
# Default since_ms to 30 days if nothing is given
new_data = exchange.get_historic_ohlcv(pair=pair,
timeframe=timeframe,
since_ms=since_ms if since_ms else
int((datetime.now() - timedelta(days=new_pairs_days)
).timestamp()) * 1000,
is_new_pair=data.empty,
candle_type=candle_type,
until_ms=until_ms if until_ms else None
)
new_data = exchange.get_historic_ohlcv(
pair=pair,
timeframe=timeframe,
since_ms=(
since_ms
if since_ms
else int((datetime.now() - timedelta(days=new_pairs_days)).timestamp()) * 1000
),
is_new_pair=data.empty,
candle_type=candle_type,
until_ms=until_ms if until_ms else None,
)
# TODO: Maybe move parsing to exchange class (?)
new_dataframe = ohlcv_to_dataframe(new_data, timeframe, pair,
fill_missing=False, drop_incomplete=True)
new_dataframe = ohlcv_to_dataframe(
new_data, timeframe, pair, fill_missing=False, drop_incomplete=True
)
if data.empty:
data = new_dataframe
else:
# Run cleaning again to ensure there were no duplicate candles
# Especially between existing and new data.
data = clean_ohlcv_dataframe(concat([data, new_dataframe], axis=0), timeframe, pair,
fill_missing=False, drop_incomplete=False)
data = clean_ohlcv_dataframe(
concat([data, new_dataframe], axis=0),
timeframe,
pair,
fill_missing=False,
drop_incomplete=False,
)
logger.debug("New Start: %s",
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug("New End: %s",
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}"
if not data.empty else 'None')
logger.debug(
"New Start: %s",
f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None",
)
logger.debug(
"New End: %s",
f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None",
)
data_handler.ohlcv_store(pair, timeframe, data=data, candle_type=candle_type)
return True
@@ -276,13 +325,18 @@ def _download_pair_history(pair: str, *,
return False
def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes: List[str],
datadir: Path, trading_mode: str,
timerange: Optional[TimeRange] = None,
new_pairs_days: int = 30, erase: bool = False,
data_format: Optional[str] = None,
prepend: bool = False,
) -> List[str]:
def refresh_backtest_ohlcv_data(
exchange: Exchange,
pairs: List[str],
timeframes: List[str],
datadir: Path,
trading_mode: str,
timerange: Optional[TimeRange] = None,
new_pairs_days: int = 30,
erase: bool = False,
data_format: Optional[str] = None,
prepend: bool = False,
) -> List[str]:
"""
Refresh stored ohlcv data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
@@ -291,63 +345,77 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes
pairs_not_available = []
data_handler = get_datahandler(datadir, data_format)
candle_type = CandleType.get_default(trading_mode)
process = ''
process = ""
for idx, pair in enumerate(pairs, start=1):
if pair not in exchange.markets:
pairs_not_available.append(pair)
logger.info(f"Skipping pair {pair}...")
continue
for timeframe in timeframes:
logger.debug(f'Downloading pair {pair}, {candle_type}, interval {timeframe}.')
process = f'{idx}/{len(pairs)}'
_download_pair_history(pair=pair, process=process,
datadir=datadir, exchange=exchange,
timerange=timerange, data_handler=data_handler,
timeframe=str(timeframe), new_pairs_days=new_pairs_days,
candle_type=candle_type,
erase=erase, prepend=prepend)
if trading_mode == 'futures':
logger.debug(f"Downloading pair {pair}, {candle_type}, interval {timeframe}.")
process = f"{idx}/{len(pairs)}"
_download_pair_history(
pair=pair,
process=process,
datadir=datadir,
exchange=exchange,
timerange=timerange,
data_handler=data_handler,
timeframe=str(timeframe),
new_pairs_days=new_pairs_days,
candle_type=candle_type,
erase=erase,
prepend=prepend,
)
if trading_mode == "futures":
# Predefined candletype (and timeframe) depending on exchange
# Downloads what is necessary to backtest based on futures data.
tf_mark = exchange.get_option('mark_ohlcv_timeframe')
tf_funding_rate = exchange.get_option('funding_fee_timeframe')
tf_mark = exchange.get_option("mark_ohlcv_timeframe")
tf_funding_rate = exchange.get_option("funding_fee_timeframe")
fr_candle_type = CandleType.from_string(exchange.get_option('mark_ohlcv_price'))
fr_candle_type = CandleType.from_string(exchange.get_option("mark_ohlcv_price"))
# All exchanges need FundingRate for futures trading.
# The timeframe is aligned to the mark-price timeframe.
combs = ((CandleType.FUNDING_RATE, tf_funding_rate), (fr_candle_type, tf_mark))
for candle_type_f, tf in combs:
logger.debug(f'Downloading pair {pair}, {candle_type_f}, interval {tf}.')
_download_pair_history(pair=pair, process=process,
datadir=datadir, exchange=exchange,
timerange=timerange, data_handler=data_handler,
timeframe=str(tf), new_pairs_days=new_pairs_days,
candle_type=candle_type_f,
erase=erase, prepend=prepend)
logger.debug(f"Downloading pair {pair}, {candle_type_f}, interval {tf}.")
_download_pair_history(
pair=pair,
process=process,
datadir=datadir,
exchange=exchange,
timerange=timerange,
data_handler=data_handler,
timeframe=str(tf),
new_pairs_days=new_pairs_days,
candle_type=candle_type_f,
erase=erase,
prepend=prepend,
)
return pairs_not_available
def _download_trades_history(exchange: Exchange,
pair: str, *,
new_pairs_days: int = 30,
timerange: Optional[TimeRange] = None,
data_handler: IDataHandler,
trading_mode: TradingMode,
) -> bool:
def _download_trades_history(
exchange: Exchange,
pair: str,
*,
new_pairs_days: int = 30,
timerange: Optional[TimeRange] = None,
data_handler: IDataHandler,
trading_mode: TradingMode,
) -> bool:
"""
Download trade history from the exchange.
Appends to previously downloaded trades data.
"""
try:
until = None
since = 0
if timerange:
if timerange.starttype == 'date':
if timerange.starttype == "date":
since = timerange.startts * 1000
if timerange.stoptype == 'date':
if timerange.stoptype == "date":
until = timerange.stopts * 1000
trades = data_handler.trades_load(pair, trading_mode)
@@ -356,60 +424,76 @@ def _download_trades_history(exchange: Exchange,
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
if not trades.empty and since > 0 and since < trades.iloc[0]['timestamp']:
if not trades.empty and since > 0 and since < trades.iloc[0]["timestamp"]:
# since is before the first trade
logger.info(f"Start ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}) earlier than "
f"available data. Redownloading trades for {pair}...")
logger.info(
f"Start ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}) earlier than "
f"available data. Redownloading trades for {pair}..."
)
trades = trades_list_to_df([])
from_id = trades.iloc[-1]['id'] if not trades.empty else None
if not trades.empty and since < trades.iloc[-1]['timestamp']:
from_id = trades.iloc[-1]["id"] if not trades.empty else None
if not trades.empty and since < trades.iloc[-1]["timestamp"]:
# Reset since to the last available point
# - 5 seconds (to ensure we're getting all trades)
since = trades.iloc[-1]['timestamp'] - (5 * 1000)
logger.info(f"Using last trade date -5s - Downloading trades for {pair} "
f"since: {format_ms_time(since)}.")
since = trades.iloc[-1]["timestamp"] - (5 * 1000)
logger.info(
f"Using last trade date -5s - Downloading trades for {pair} "
f"since: {format_ms_time(since)}."
)
if not since:
since = dt_ts(dt_now() - timedelta(days=new_pairs_days))
logger.debug("Current Start: %s", 'None' if trades.empty else
f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}")
logger.debug("Current End: %s", 'None' if trades.empty else
f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}")
logger.debug(
"Current Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"Current End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"Current Amount of trades: {len(trades)}")
# Default since_ms to 30 days if nothing is given
new_trades = exchange.get_historic_trades(pair=pair,
since=since,
until=until,
from_id=from_id,
)
new_trades = exchange.get_historic_trades(
pair=pair,
since=since,
until=until,
from_id=from_id,
)
new_trades_df = trades_list_to_df(new_trades[1])
trades = concat([trades, new_trades_df], axis=0)
# Remove duplicates to make sure we're not storing data we don't need
trades = trades_df_remove_duplicates(trades)
data_handler.trades_store(pair, trades, trading_mode)
logger.debug("New Start: %s", 'None' if trades.empty else
f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}")
logger.debug("New End: %s", 'None' if trades.empty else
f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}")
logger.debug(
"New Start: %s",
"None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.debug(
"New End: %s",
"None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}",
)
logger.info(f"New Amount of trades: {len(trades)}")
return True
except Exception:
logger.exception(
f'Failed to download historic trades for pair: "{pair}". '
)
logger.exception(f'Failed to download historic trades for pair: "{pair}". ')
return False
def refresh_backtest_trades_data(exchange: Exchange, pairs: List[str], datadir: Path,
timerange: TimeRange, trading_mode: TradingMode,
new_pairs_days: int = 30,
erase: bool = False, data_format: str = 'feather',
) -> List[str]:
def refresh_backtest_trades_data(
exchange: Exchange,
pairs: List[str],
datadir: Path,
timerange: TimeRange,
trading_mode: TradingMode,
new_pairs_days: int = 30,
erase: bool = False,
data_format: str = "feather",
) -> List[str]:
"""
Refresh stored trades data for backtesting and hyperopt operations.
Used by freqtrade download-data subcommand.
@@ -425,15 +509,17 @@ def refresh_backtest_trades_data(exchange: Exchange, pairs: List[str], datadir:
if erase:
if data_handler.trades_purge(pair, trading_mode):
logger.info(f'Deleting existing data for pair {pair}.')
logger.info(f"Deleting existing data for pair {pair}.")
logger.info(f'Downloading trades for pair {pair}.')
_download_trades_history(exchange=exchange,
pair=pair,
new_pairs_days=new_pairs_days,
timerange=timerange,
data_handler=data_handler,
trading_mode=trading_mode)
logger.info(f"Downloading trades for pair {pair}.")
_download_trades_history(
exchange=exchange,
pair=pair,
new_pairs_days=new_pairs_days,
timerange=timerange,
data_handler=data_handler,
trading_mode=trading_mode,
)
return pairs_not_available
@@ -445,15 +531,18 @@ def get_timerange(data: Dict[str, DataFrame]) -> Tuple[datetime, datetime]:
:return: tuple containing min_date, max_date
"""
timeranges = [
(frame['date'].min().to_pydatetime(), frame['date'].max().to_pydatetime())
(frame["date"].min().to_pydatetime(), frame["date"].max().to_pydatetime())
for frame in data.values()
]
return (min(timeranges, key=operator.itemgetter(0))[0],
max(timeranges, key=operator.itemgetter(1))[1])
return (
min(timeranges, key=operator.itemgetter(0))[0],
max(timeranges, key=operator.itemgetter(1))[1],
)
def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime,
max_date: datetime, timeframe_min: int) -> bool:
def validate_backtest_data(
data: DataFrame, pair: str, min_date: datetime, max_date: datetime, timeframe_min: int
) -> bool:
"""
Validates preprocessed backtesting data for missing values and shows warnings about it that.
@@ -469,89 +558,114 @@ def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime,
dflen = len(data)
if dflen < expected_frames:
found_missing = True
logger.warning("%s has missing frames: expected %s, got %s, that's %s missing values",
pair, expected_frames, dflen, expected_frames - dflen)
logger.warning(
"%s has missing frames: expected %s, got %s, that's %s missing values",
pair,
expected_frames,
dflen,
expected_frames - dflen,
)
return found_missing
def download_data_main(config: Config) -> None:
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if "days" in config:
time_since = (datetime.now() - timedelta(days=config["days"])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f"{time_since}-")
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
if "timerange" in config:
timerange = timerange.parse_timerange(config["timerange"])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
config["stake_currency"] = ""
pairs_not_available: List[str] = []
# Init exchange
from freqtrade.resolvers.exchange_resolver import ExchangeResolver
exchange = ExchangeResolver.load_exchange(config, validate=False)
available_pairs = [
p for p in exchange.get_markets(
tradable_only=True, active_only=not config.get('include_inactive')
).keys()
p
for p in exchange.get_markets(
tradable_only=True, active_only=not config.get("include_inactive")
).keys()
]
expanded_pairs = dynamic_expand_pairlist(config, available_pairs)
if 'timeframes' not in config:
config['timeframes'] = DL_DATA_TIMEFRAMES
if "timeframes" not in config:
config["timeframes"] = DL_DATA_TIMEFRAMES
# Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False):
if not config["exchange"].get("skip_pair_validation", False):
exchange.validate_pairs(expanded_pairs)
logger.info(f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
logger.info(
f"About to download pairs: {expanded_pairs}, "
f"intervals: {config['timeframes']} to {config['datadir']}"
)
if len(expanded_pairs) == 0:
logger.warning(
"No pairs available for download. "
"Please make sure you're using the correct Pair naming for your selected trade mode. \n"
f"More info: {DOCS_LINK}/bot-basics/#pair-naming")
f"More info: {DOCS_LINK}/bot-basics/#pair-naming"
)
for timeframe in config['timeframes']:
for timeframe in config["timeframes"]:
exchange.validate_timeframes(timeframe)
# Start downloading
try:
if config.get('download_trades'):
if config.get("download_trades"):
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=expanded_pairs, datadir=config['datadir'],
timerange=timerange, new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_trades'],
trading_mode=config.get('trading_mode', TradingMode.SPOT),
)
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
candle_type=config.get('candle_type_def', CandleType.SPOT),
exchange,
pairs=expanded_pairs,
datadir=config["datadir"],
timerange=timerange,
new_pairs_days=config["new_pairs_days"],
erase=bool(config.get("erase")),
data_format=config["dataformat_trades"],
trading_mode=config.get("trading_mode", TradingMode.SPOT),
)
if config.get("convert_trades") or not exchange.get_option("ohlcv_has_history", True):
# Convert downloaded trade data to different timeframes
# Only auto-convert for exchanges without historic klines
convert_trades_to_ohlcv(
pairs=expanded_pairs,
timeframes=config["timeframes"],
datadir=config["datadir"],
timerange=timerange,
erase=bool(config.get("erase")),
data_format_ohlcv=config["dataformat_ohlcv"],
data_format_trades=config["dataformat_trades"],
candle_type=config.get("candle_type_def", CandleType.SPOT),
)
else:
if not exchange.get_option('ohlcv_has_history', True):
if not exchange.get_option("ohlcv_has_history", True):
raise OperationalException(
f"Historic klines not available for {exchange.name}. "
"Please use `--dl-trades` instead for this exchange "
"(will unfortunately take a long time)."
)
)
migrate_data(config, exchange)
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange,
new_pairs_days=config['new_pairs_days'],
erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'],
trading_mode=config.get('trading_mode', 'spot'),
prepend=config.get('prepend_data', False)
exchange,
pairs=expanded_pairs,
timeframes=config["timeframes"],
datadir=config["datadir"],
timerange=timerange,
new_pairs_days=config["new_pairs_days"],
erase=bool(config.get("erase")),
data_format=config["dataformat_ohlcv"],
trading_mode=config.get("trading_mode", "spot"),
prepend=config.get("prepend_data", False),
)
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
logger.info(
f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}."
)

View File

@@ -1,5 +1,6 @@
import logging
import math
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, Tuple
@@ -31,7 +32,8 @@ def calculate_market_change(data: Dict[str, pd.DataFrame], column: str = "close"
def combine_dataframes_by_column(
data: Dict[str, pd.DataFrame], column: str = "close") -> pd.DataFrame:
data: Dict[str, pd.DataFrame], column: str = "close"
) -> pd.DataFrame:
"""
Combine multiple dataframes "column"
:param data: Dict of Dataframes, dict key should be pair.
@@ -41,14 +43,15 @@ def combine_dataframes_by_column(
"""
if not data:
raise ValueError("No data provided.")
df_comb = pd.concat([data[pair].set_index('date').rename(
{column: pair}, axis=1)[pair] for pair in data], axis=1)
df_comb = pd.concat(
[data[pair].set_index("date").rename({column: pair}, axis=1)[pair] for pair in data], axis=1
)
return df_comb
def combined_dataframes_with_rel_mean(
data: Dict[str, pd.DataFrame], fromdt: datetime, todt: datetime,
column: str = "close") -> pd.DataFrame:
data: Dict[str, pd.DataFrame], fromdt: datetime, todt: datetime, column: str = "close"
) -> pd.DataFrame:
"""
Combine multiple dataframes "column"
:param data: Dict of Dataframes, dict key should be pair.
@@ -60,14 +63,15 @@ def combined_dataframes_with_rel_mean(
df_comb = combine_dataframes_by_column(data, column)
# Trim dataframes to the given timeframe
df_comb = df_comb.iloc[(df_comb.index >= fromdt) & (df_comb.index < todt)]
df_comb['count'] = df_comb.count(axis=1)
df_comb['mean'] = df_comb.mean(axis=1)
df_comb['rel_mean'] = df_comb['mean'].pct_change().fillna(0).cumsum()
return df_comb[['mean', 'rel_mean', 'count']]
df_comb["count"] = df_comb.count(axis=1)
df_comb["mean"] = df_comb.mean(axis=1)
df_comb["rel_mean"] = df_comb["mean"].pct_change().fillna(0).cumsum()
return df_comb[["mean", "rel_mean", "count"]]
def combine_dataframes_with_mean(
data: Dict[str, pd.DataFrame], column: str = "close") -> pd.DataFrame:
data: Dict[str, pd.DataFrame], column: str = "close"
) -> pd.DataFrame:
"""
Combine multiple dataframes "column"
:param data: Dict of Dataframes, dict key should be pair.
@@ -78,13 +82,14 @@ def combine_dataframes_with_mean(
"""
df_comb = combine_dataframes_by_column(data, column)
df_comb['mean'] = df_comb.mean(axis=1)
df_comb["mean"] = df_comb.mean(axis=1)
return df_comb
def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
timeframe: str) -> pd.DataFrame:
def create_cum_profit(
df: pd.DataFrame, trades: pd.DataFrame, col_name: str, timeframe: str
) -> pd.DataFrame:
"""
Adds a column `col_name` with the cumulative profit for the given trades array.
:param df: DataFrame with date index
@@ -97,11 +102,11 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
if len(trades) == 0:
raise ValueError("Trade dataframe empty.")
from freqtrade.exchange import timeframe_to_resample_freq
timeframe_freq = timeframe_to_resample_freq(timeframe)
# Resample to timeframe to make sure trades match candles
_trades_sum = trades.resample(timeframe_freq, on='close_date'
)[['profit_abs']].sum()
df.loc[:, col_name] = _trades_sum['profit_abs'].cumsum()
_trades_sum = trades.resample(timeframe_freq, on="close_date")[["profit_abs"]].sum()
df.loc[:, col_name] = _trades_sum["profit_abs"].cumsum()
# Set first value to 0
df.loc[df.iloc[0].name, col_name] = 0
# FFill to get continuous
@@ -109,29 +114,34 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
return df
def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str,
starting_balance: float) -> pd.DataFrame:
def _calc_drawdown_series(
profit_results: pd.DataFrame, *, date_col: str, value_col: str, starting_balance: float
) -> pd.DataFrame:
max_drawdown_df = pd.DataFrame()
max_drawdown_df['cumulative'] = profit_results[value_col].cumsum()
max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax()
max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value']
max_drawdown_df['date'] = profit_results.loc[:, date_col]
max_drawdown_df["cumulative"] = profit_results[value_col].cumsum()
max_drawdown_df["high_value"] = max_drawdown_df["cumulative"].cummax()
max_drawdown_df["drawdown"] = max_drawdown_df["cumulative"] - max_drawdown_df["high_value"]
max_drawdown_df["date"] = profit_results.loc[:, date_col]
if starting_balance:
cumulative_balance = starting_balance + max_drawdown_df['cumulative']
max_balance = starting_balance + max_drawdown_df['high_value']
max_drawdown_df['drawdown_relative'] = ((max_balance - cumulative_balance) / max_balance)
cumulative_balance = starting_balance + max_drawdown_df["cumulative"]
max_balance = starting_balance + max_drawdown_df["high_value"]
max_drawdown_df["drawdown_relative"] = (max_balance - cumulative_balance) / max_balance
else:
# NOTE: This is not completely accurate,
# but might good enough if starting_balance is not available
max_drawdown_df['drawdown_relative'] = (
(max_drawdown_df['high_value'] - max_drawdown_df['cumulative'])
/ max_drawdown_df['high_value'])
max_drawdown_df["drawdown_relative"] = (
max_drawdown_df["high_value"] - max_drawdown_df["cumulative"]
) / max_drawdown_df["high_value"]
return max_drawdown_df
def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',
value_col: str = 'profit_ratio', starting_balance: float = 0.0
):
def calculate_underwater(
trades: pd.DataFrame,
*,
date_col: str = "close_date",
value_col: str = "profit_ratio",
starting_balance: float = 0.0,
):
"""
Calculate max drawdown and the corresponding close dates
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
@@ -145,25 +155,37 @@ def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date',
raise ValueError("Trade dataframe empty.")
profit_results = trades.sort_values(date_col).reset_index(drop=True)
max_drawdown_df = _calc_drawdown_series(
profit_results,
date_col=date_col,
value_col=value_col,
starting_balance=starting_balance)
profit_results, date_col=date_col, value_col=value_col, starting_balance=starting_balance
)
return max_drawdown_df
def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date',
value_col: str = 'profit_abs', starting_balance: float = 0,
relative: bool = False
) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float, float]:
@dataclass()
class DrawDownResult:
drawdown_abs: float = 0.0
high_date: pd.Timestamp = None
low_date: pd.Timestamp = None
high_value: float = 0.0
low_value: float = 0.0
relative_account_drawdown: float = 0.0
def calculate_max_drawdown(
trades: pd.DataFrame,
*,
date_col: str = "close_date",
value_col: str = "profit_abs",
starting_balance: float = 0,
relative: bool = False,
) -> DrawDownResult:
"""
Calculate max drawdown and the corresponding close dates
:param trades: DataFrame containing trades (requires columns close_date and profit_ratio)
:param date_col: Column in DataFrame to use for dates (defaults to 'close_date')
:param value_col: Column in DataFrame to use for values (defaults to 'profit_abs')
:param starting_balance: Portfolio starting balance - properly calculate relative drawdown.
:return: Tuple (float, highdate, lowdate, highvalue, lowvalue, relative_drawdown)
:return: DrawDownResult object
with absolute max drawdown, high and low time and high and low value,
and the relative account drawdown
:raise: ValueError if trade-dataframe was found empty.
@@ -172,32 +194,31 @@ def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date'
raise ValueError("Trade dataframe empty.")
profit_results = trades.sort_values(date_col).reset_index(drop=True)
max_drawdown_df = _calc_drawdown_series(
profit_results,
date_col=date_col,
value_col=value_col,
starting_balance=starting_balance
profit_results, date_col=date_col, value_col=value_col, starting_balance=starting_balance
)
idxmin = (
max_drawdown_df['drawdown_relative'].idxmax()
if relative else max_drawdown_df['drawdown'].idxmin()
max_drawdown_df["drawdown_relative"].idxmax()
if relative
else max_drawdown_df["drawdown"].idxmin()
)
if idxmin == 0:
raise ValueError("No losing trade, therefore no drawdown.")
high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]['high_value'].idxmax(), date_col]
high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]["high_value"].idxmax(), date_col]
low_date = profit_results.loc[idxmin, date_col]
high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin]
['high_value'].idxmax(), 'cumulative']
low_val = max_drawdown_df.loc[idxmin, 'cumulative']
max_drawdown_rel = max_drawdown_df.loc[idxmin, 'drawdown_relative']
high_val = max_drawdown_df.loc[
max_drawdown_df.iloc[:idxmin]["high_value"].idxmax(), "cumulative"
]
low_val = max_drawdown_df.loc[idxmin, "cumulative"]
max_drawdown_rel = max_drawdown_df.loc[idxmin, "drawdown_relative"]
return (
abs(max_drawdown_df.loc[idxmin, 'drawdown']),
high_date,
low_date,
high_val,
low_val,
max_drawdown_rel
return DrawDownResult(
drawdown_abs=abs(max_drawdown_df.loc[idxmin, "drawdown"]),
high_date=high_date,
low_date=low_date,
high_value=high_val,
low_value=low_val,
relative_account_drawdown=max_drawdown_rel,
)
@@ -213,9 +234,9 @@ def calculate_csum(trades: pd.DataFrame, starting_balance: float = 0) -> Tuple[f
raise ValueError("Trade dataframe empty.")
csum_df = pd.DataFrame()
csum_df['sum'] = trades['profit_abs'].cumsum()
csum_min = csum_df['sum'].min() + starting_balance
csum_max = csum_df['sum'].max() + starting_balance
csum_df["sum"] = trades["profit_abs"].cumsum()
csum_min = csum_df["sum"].min() + starting_balance
csum_max = csum_df["sum"].max() + starting_balance
return csum_min, csum_max
@@ -245,28 +266,29 @@ def calculate_expectancy(trades: pd.DataFrame) -> Tuple[float, float]:
expectancy_ratio = 100
if len(trades) > 0:
winning_trades = trades.loc[trades['profit_abs'] > 0]
losing_trades = trades.loc[trades['profit_abs'] < 0]
profit_sum = winning_trades['profit_abs'].sum()
loss_sum = abs(losing_trades['profit_abs'].sum())
winning_trades = trades.loc[trades["profit_abs"] > 0]
losing_trades = trades.loc[trades["profit_abs"] < 0]
profit_sum = winning_trades["profit_abs"].sum()
loss_sum = abs(losing_trades["profit_abs"].sum())
nb_win_trades = len(winning_trades)
nb_loss_trades = len(losing_trades)
average_win = (profit_sum / nb_win_trades) if nb_win_trades > 0 else 0
average_loss = (loss_sum / nb_loss_trades) if nb_loss_trades > 0 else 0
winrate = (nb_win_trades / len(trades))
loserate = (nb_loss_trades / len(trades))
winrate = nb_win_trades / len(trades)
loserate = nb_loss_trades / len(trades)
expectancy = (winrate * average_win) - (loserate * average_loss)
if (average_loss > 0):
if average_loss > 0:
risk_reward_ratio = average_win / average_loss
expectancy_ratio = ((1 + risk_reward_ratio) * winrate) - 1
return expectancy, expectancy_ratio
def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: datetime,
starting_balance: float) -> float:
def calculate_sortino(
trades: pd.DataFrame, min_date: datetime, max_date: datetime, starting_balance: float
) -> float:
"""
Calculate sortino
:param trades: DataFrame containing trades (requires columns profit_abs)
@@ -275,12 +297,12 @@ def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: dateti
if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date):
return 0
total_profit = trades['profit_abs'] / starting_balance
total_profit = trades["profit_abs"] / starting_balance
days_period = max(1, (max_date - min_date).days)
expected_returns_mean = total_profit.sum() / days_period
down_stdev = np.std(trades.loc[trades['profit_abs'] < 0, 'profit_abs'] / starting_balance)
down_stdev = np.std(trades.loc[trades["profit_abs"] < 0, "profit_abs"] / starting_balance)
if down_stdev != 0 and not np.isnan(down_stdev):
sortino_ratio = expected_returns_mean / down_stdev * np.sqrt(365)
@@ -292,8 +314,9 @@ def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: dateti
return sortino_ratio
def calculate_sharpe(trades: pd.DataFrame, min_date: datetime, max_date: datetime,
starting_balance: float) -> float:
def calculate_sharpe(
trades: pd.DataFrame, min_date: datetime, max_date: datetime, starting_balance: float
) -> float:
"""
Calculate sharpe
:param trades: DataFrame containing trades (requires column profit_abs)
@@ -302,7 +325,7 @@ def calculate_sharpe(trades: pd.DataFrame, min_date: datetime, max_date: datetim
if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date):
return 0
total_profit = trades['profit_abs'] / starting_balance
total_profit = trades["profit_abs"] / starting_balance
days_period = max(1, (max_date - min_date).days)
expected_returns_mean = total_profit.sum() / days_period
@@ -318,8 +341,9 @@ def calculate_sharpe(trades: pd.DataFrame, min_date: datetime, max_date: datetim
return sharp_ratio
def calculate_calmar(trades: pd.DataFrame, min_date: datetime, max_date: datetime,
starting_balance: float) -> float:
def calculate_calmar(
trades: pd.DataFrame, min_date: datetime, max_date: datetime, starting_balance: float
) -> float:
"""
Calculate calmar
:param trades: DataFrame containing trades (requires columns close_date and profit_abs)
@@ -328,7 +352,7 @@ def calculate_calmar(trades: pd.DataFrame, min_date: datetime, max_date: datetim
if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date):
return 0
total_profit = trades['profit_abs'].sum() / starting_balance
total_profit = trades["profit_abs"].sum() / starting_balance
days_period = max(1, (max_date - min_date).days)
# adding slippage of 0.1% per trade
@@ -337,9 +361,10 @@ def calculate_calmar(trades: pd.DataFrame, min_date: datetime, max_date: datetim
# calculate max drawdown
try:
_, _, _, _, _, max_drawdown = calculate_max_drawdown(
drawdown = calculate_max_drawdown(
trades, value_col="profit_abs", starting_balance=starting_balance
)
max_drawdown = drawdown.relative_account_drawdown
except ValueError:
max_drawdown = 0

View File

@@ -1,5 +1,6 @@
# pragma pylint: disable=W0603
""" Edge positioning package """
"""Edge positioning package"""
import logging
from collections import defaultdict
from copy import deepcopy
@@ -46,48 +47,49 @@ class Edge:
_cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs
def __init__(self, config: Config, exchange, strategy) -> None:
self.config = config
self.exchange = exchange
self.strategy: IStrategy = strategy
self.edge_config = self.config.get('edge', {})
self.edge_config = self.config.get("edge", {})
self._cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs
self._final_pairs: list = []
# checking max_open_trades. it should be -1 as with Edge
# the number of trades is determined by position size
if self.config['max_open_trades'] != float('inf'):
logger.critical('max_open_trades should be -1 in config !')
if self.config["max_open_trades"] != float("inf"):
logger.critical("max_open_trades should be -1 in config !")
if self.config['stake_amount'] != UNLIMITED_STAKE_AMOUNT:
raise OperationalException('Edge works only with unlimited stake amount')
if self.config["stake_amount"] != UNLIMITED_STAKE_AMOUNT:
raise OperationalException("Edge works only with unlimited stake amount")
self._capital_ratio: float = self.config['tradable_balance_ratio']
self._allowed_risk: float = self.edge_config.get('allowed_risk')
self._since_number_of_days: int = self.edge_config.get('calculate_since_number_of_days', 14)
self._capital_ratio: float = self.config["tradable_balance_ratio"]
self._allowed_risk: float = self.edge_config.get("allowed_risk")
self._since_number_of_days: int = self.edge_config.get("calculate_since_number_of_days", 14)
self._last_updated: int = 0 # Timestamp of pairs last updated time
self._refresh_pairs = True
self._stoploss_range_min = float(self.edge_config.get('stoploss_range_min', -0.01))
self._stoploss_range_max = float(self.edge_config.get('stoploss_range_max', -0.05))
self._stoploss_range_step = float(self.edge_config.get('stoploss_range_step', -0.001))
self._stoploss_range_min = float(self.edge_config.get("stoploss_range_min", -0.01))
self._stoploss_range_max = float(self.edge_config.get("stoploss_range_max", -0.05))
self._stoploss_range_step = float(self.edge_config.get("stoploss_range_step", -0.001))
# calculating stoploss range
self._stoploss_range = np.arange(
self._stoploss_range_min,
self._stoploss_range_max,
self._stoploss_range_step
self._stoploss_range_min, self._stoploss_range_max, self._stoploss_range_step
)
self._timerange: TimeRange = TimeRange.parse_timerange(
f"{(dt_now() - timedelta(days=self._since_number_of_days)).strftime('%Y%m%d')}-")
if config.get('fee'):
self.fee = config['fee']
f"{(dt_now() - timedelta(days=self._since_number_of_days)).strftime('%Y%m%d')}-"
)
if config.get("fee"):
self.fee = config["fee"]
else:
try:
self.fee = self.exchange.get_fee(symbol=expand_pairlist(
self.config['exchange']['pair_whitelist'], list(self.exchange.markets))[0])
self.fee = self.exchange.get_fee(
symbol=expand_pairlist(
self.config["exchange"]["pair_whitelist"], list(self.exchange.markets)
)[0]
)
except IndexError:
self.fee = None
@@ -95,28 +97,30 @@ class Edge:
if self.fee is None and pairs:
self.fee = self.exchange.get_fee(pairs[0])
heartbeat = self.edge_config.get('process_throttle_secs')
heartbeat = self.edge_config.get("process_throttle_secs")
if (self._last_updated > 0) and (
self._last_updated + heartbeat > int(dt_now().timestamp())):
self._last_updated + heartbeat > int(dt_now().timestamp())
):
return False
data: Dict[str, Any] = {}
logger.info('Using stake_currency: %s ...', self.config['stake_currency'])
logger.info('Using local backtesting data (using whitelist in given config) ...')
logger.info("Using stake_currency: %s ...", self.config["stake_currency"])
logger.info("Using local backtesting data (using whitelist in given config) ...")
if self._refresh_pairs:
timerange_startup = deepcopy(self._timerange)
timerange_startup.subtract_start(timeframe_to_seconds(
self.strategy.timeframe) * self.strategy.startup_candle_count)
timerange_startup.subtract_start(
timeframe_to_seconds(self.strategy.timeframe) * self.strategy.startup_candle_count
)
refresh_data(
datadir=self.config['datadir'],
datadir=self.config["datadir"],
pairs=pairs,
exchange=self.exchange,
timeframe=self.strategy.timeframe,
timerange=timerange_startup,
data_format=self.config['dataformat_ohlcv'],
candle_type=self.config.get('candle_type_def', CandleType.SPOT),
data_format=self.config["dataformat_ohlcv"],
candle_type=self.config.get("candle_type_def", CandleType.SPOT),
)
# Download informative pairs too
res = defaultdict(list)
@@ -124,26 +128,27 @@ class Edge:
res[timeframe].append(pair)
for timeframe, inf_pairs in res.items():
timerange_startup = deepcopy(self._timerange)
timerange_startup.subtract_start(timeframe_to_seconds(
timeframe) * self.strategy.startup_candle_count)
timerange_startup.subtract_start(
timeframe_to_seconds(timeframe) * self.strategy.startup_candle_count
)
refresh_data(
datadir=self.config['datadir'],
datadir=self.config["datadir"],
pairs=inf_pairs,
exchange=self.exchange,
timeframe=timeframe,
timerange=timerange_startup,
data_format=self.config['dataformat_ohlcv'],
candle_type=self.config.get('candle_type_def', CandleType.SPOT),
data_format=self.config["dataformat_ohlcv"],
candle_type=self.config.get("candle_type_def", CandleType.SPOT),
)
data = load_data(
datadir=self.config['datadir'],
datadir=self.config["datadir"],
pairs=pairs,
timeframe=self.strategy.timeframe,
timerange=self._timerange,
startup_candles=self.strategy.startup_candle_count,
data_format=self.config['dataformat_ohlcv'],
candle_type=self.config.get('candle_type_def', CandleType.SPOT),
data_format=self.config["dataformat_ohlcv"],
candle_type=self.config.get("candle_type_def", CandleType.SPOT),
)
if not data:
@@ -152,27 +157,29 @@ class Edge:
logger.critical("No data found. Edge is stopped ...")
return False
# Fake run-mode to Edge
prior_rm = self.config['runmode']
self.config['runmode'] = RunMode.EDGE
prior_rm = self.config["runmode"]
self.config["runmode"] = RunMode.EDGE
preprocessed = self.strategy.advise_all_indicators(data)
self.config['runmode'] = prior_rm
self.config["runmode"] = prior_rm
# Print timeframe
min_date, max_date = get_timerange(preprocessed)
logger.info(f'Measuring data from {min_date.strftime(DATETIME_PRINT_FORMAT)} '
f'up to {max_date.strftime(DATETIME_PRINT_FORMAT)} '
f'({(max_date - min_date).days} days)..')
logger.info(
f"Measuring data from {min_date.strftime(DATETIME_PRINT_FORMAT)} "
f"up to {max_date.strftime(DATETIME_PRINT_FORMAT)} "
f"({(max_date - min_date).days} days).."
)
# TODO: Should edge support shorts? needs to be investigated further
# * (add enter_short exit_short)
headers = ['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long']
headers = ["date", "open", "high", "low", "close", "enter_long", "exit_long"]
trades: list = []
for pair, pair_data in preprocessed.items():
# Sorting dataframe by date and reset index
pair_data = pair_data.sort_values(by=['date'])
pair_data = pair_data.sort_values(by=["date"])
pair_data = pair_data.reset_index(drop=True)
df_analyzed = self.strategy.ft_advise_signals(pair_data, {'pair': pair})[headers].copy()
df_analyzed = self.strategy.ft_advise_signals(pair_data, {"pair": pair})[headers].copy()
trades += self._find_trades_for_stoploss_range(df_analyzed, pair, self._stoploss_range)
@@ -188,8 +195,9 @@ class Edge:
return True
def stake_amount(self, pair: str, free_capital: float,
total_capital: float, capital_in_trade: float) -> float:
def stake_amount(
self, pair: str, free_capital: float, total_capital: float, capital_in_trade: float
) -> float:
stoploss = self.get_stoploss(pair)
available_capital = (total_capital + capital_in_trade) * self._capital_ratio
allowed_capital_at_risk = available_capital * self._allowed_risk
@@ -198,14 +206,18 @@ class Edge:
position_size = min(min(max_position_size, free_capital), available_capital)
if pair in self._cached_pairs:
logger.info(
'winrate: %s, expectancy: %s, position size: %s, pair: %s,'
' capital in trade: %s, free capital: %s, total capital: %s,'
' stoploss: %s, available capital: %s.',
"winrate: %s, expectancy: %s, position size: %s, pair: %s,"
" capital in trade: %s, free capital: %s, total capital: %s,"
" stoploss: %s, available capital: %s.",
self._cached_pairs[pair].winrate,
self._cached_pairs[pair].expectancy,
position_size, pair,
capital_in_trade, free_capital, total_capital,
stoploss, available_capital
position_size,
pair,
capital_in_trade,
free_capital,
total_capital,
stoploss,
available_capital,
)
return round(position_size, 15)
@@ -213,8 +225,10 @@ class Edge:
if pair in self._cached_pairs:
return self._cached_pairs[pair].stoploss
else:
logger.warning(f'Tried to access stoploss of non-existing pair {pair}, '
'strategy stoploss is returned instead.')
logger.warning(
f"Tried to access stoploss of non-existing pair {pair}, "
"strategy stoploss is returned instead."
)
return self.strategy.stoploss
def adjust(self, pairs: List[str]) -> list:
@@ -224,8 +238,8 @@ class Edge:
final = []
for pair, info in self._cached_pairs.items():
if (
info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2))
and info.winrate > float(self.edge_config.get('minimum_winrate', 0.60))
info.expectancy > float(self.edge_config.get("minimum_expectancy", 0.2))
and info.winrate > float(self.edge_config.get("minimum_winrate", 0.60))
and pair in pairs
):
final.append(pair)
@@ -234,14 +248,14 @@ class Edge:
self._final_pairs = final
if self._final_pairs:
logger.info(
'Minimum expectancy and minimum winrate are met only for %s,'
' so other pairs are filtered out.',
self._final_pairs
"Minimum expectancy and minimum winrate are met only for %s,"
" so other pairs are filtered out.",
self._final_pairs,
)
else:
logger.info(
'Edge removed all pairs as no pair with minimum expectancy '
'and minimum winrate was found !'
"Edge removed all pairs as no pair with minimum expectancy "
"and minimum winrate was found !"
)
return self._final_pairs
@@ -252,14 +266,17 @@ class Edge:
"""
final = []
for pair, info in self._cached_pairs.items():
if (info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) and
info.winrate > float(self.edge_config.get('minimum_winrate', 0.60))):
final.append({
'Pair': pair,
'Winrate': info.winrate,
'Expectancy': info.expectancy,
'Stoploss': info.stoploss,
})
if info.expectancy > float(
self.edge_config.get("minimum_expectancy", 0.2)
) and info.winrate > float(self.edge_config.get("minimum_winrate", 0.60)):
final.append(
{
"Pair": pair,
"Winrate": info.winrate,
"Expectancy": info.expectancy,
"Stoploss": info.stoploss,
}
)
return final
def _fill_calculable_fields(self, result: DataFrame) -> DataFrame:
@@ -279,28 +296,29 @@ class Edge:
# All returned values are relative, they are defined as ratios.
stake = 0.015
result['trade_duration'] = result['close_date'] - result['open_date']
result["trade_duration"] = result["close_date"] - result["open_date"]
result['trade_duration'] = result['trade_duration'].map(
lambda x: int(x.total_seconds() / 60))
result["trade_duration"] = result["trade_duration"].map(
lambda x: int(x.total_seconds() / 60)
)
# Spends, Takes, Profit, Absolute Profit
# Buy Price
result['buy_vol'] = stake / result['open_rate'] # How many target are we buying
result['buy_fee'] = stake * self.fee
result['buy_spend'] = stake + result['buy_fee'] # How much we're spending
result["buy_vol"] = stake / result["open_rate"] # How many target are we buying
result["buy_fee"] = stake * self.fee
result["buy_spend"] = stake + result["buy_fee"] # How much we're spending
# Sell price
result['sell_sum'] = result['buy_vol'] * result['close_rate']
result['sell_fee'] = result['sell_sum'] * self.fee
result['sell_take'] = result['sell_sum'] - result['sell_fee']
result["sell_sum"] = result["buy_vol"] * result["close_rate"]
result["sell_fee"] = result["sell_sum"] * self.fee
result["sell_take"] = result["sell_sum"] - result["sell_fee"]
# profit_ratio
result['profit_ratio'] = (result['sell_take'] - result['buy_spend']) / result['buy_spend']
result["profit_ratio"] = (result["sell_take"] - result["buy_spend"]) / result["buy_spend"]
# Absolute profit
result['profit_abs'] = result['sell_take'] - result['buy_spend']
result["profit_abs"] = result["sell_take"] - result["buy_spend"]
return result
@@ -310,8 +328,8 @@ class Edge:
The calculation will be done per pair and per strategy.
"""
# Removing pairs having less than min_trades_number
min_trades_number = self.edge_config.get('min_trade_number', 10)
results = results.groupby(['pair', 'stoploss']).filter(lambda x: len(x) > min_trades_number)
min_trades_number = self.edge_config.get("min_trade_number", 10)
results = results.groupby(["pair", "stoploss"]).filter(lambda x: len(x) > min_trades_number)
###################################
# Removing outliers (Only Pumps) from the dataset
@@ -319,13 +337,15 @@ class Edge:
# Then every value more than (standard deviation + 2*average) is out (pump)
#
# Removing Pumps
if self.edge_config.get('remove_pumps', False):
results = results[results['profit_abs'] < 2 * results['profit_abs'].std()
+ results['profit_abs'].mean()]
if self.edge_config.get("remove_pumps", False):
results = results[
results["profit_abs"]
< 2 * results["profit_abs"].std() + results["profit_abs"].mean()
]
##########################################################################
# Removing trades having a duration more than X minutes (set in config)
max_trade_duration = self.edge_config.get('max_trade_duration_minute', 1440)
max_trade_duration = self.edge_config.get("max_trade_duration_minute", 1440)
results = results[results.trade_duration < max_trade_duration]
#######################################################################
@@ -333,44 +353,54 @@ class Edge:
return {}
groupby_aggregator = {
'profit_abs': [
('nb_trades', 'count'), # number of all trades
('profit_sum', lambda x: x[x > 0].sum()), # cumulative profit of all winning trades
('loss_sum', lambda x: abs(x[x < 0].sum())), # cumulative loss of all losing trades
('nb_win_trades', lambda x: x[x > 0].count()) # number of winning trades
"profit_abs": [
("nb_trades", "count"), # number of all trades
("profit_sum", lambda x: x[x > 0].sum()), # cumulative profit of all winning trades
("loss_sum", lambda x: abs(x[x < 0].sum())), # cumulative loss of all losing trades
("nb_win_trades", lambda x: x[x > 0].count()), # number of winning trades
],
'trade_duration': [('avg_trade_duration', 'mean')]
"trade_duration": [("avg_trade_duration", "mean")],
}
# Group by (pair and stoploss) by applying above aggregator
df = results.groupby(['pair', 'stoploss'])[['profit_abs', 'trade_duration']].agg(
groupby_aggregator).reset_index(col_level=1)
df = (
results.groupby(["pair", "stoploss"])[["profit_abs", "trade_duration"]]
.agg(groupby_aggregator)
.reset_index(col_level=1)
)
# Dropping level 0 as we don't need it
df.columns = df.columns.droplevel(0)
# Calculating number of losing trades, average win and average loss
df['nb_loss_trades'] = df['nb_trades'] - df['nb_win_trades']
df['average_win'] = np.where(df['nb_win_trades'] == 0, 0.0,
df['profit_sum'] / df['nb_win_trades'])
df['average_loss'] = np.where(df['nb_loss_trades'] == 0, 0.0,
df['loss_sum'] / df['nb_loss_trades'])
df["nb_loss_trades"] = df["nb_trades"] - df["nb_win_trades"]
df["average_win"] = np.where(
df["nb_win_trades"] == 0, 0.0, df["profit_sum"] / df["nb_win_trades"]
)
df["average_loss"] = np.where(
df["nb_loss_trades"] == 0, 0.0, df["loss_sum"] / df["nb_loss_trades"]
)
# Win rate = number of profitable trades / number of trades
df['winrate'] = df['nb_win_trades'] / df['nb_trades']
df["winrate"] = df["nb_win_trades"] / df["nb_trades"]
# risk_reward_ratio = average win / average loss
df['risk_reward_ratio'] = df['average_win'] / df['average_loss']
df["risk_reward_ratio"] = df["average_win"] / df["average_loss"]
# required_risk_reward = (1 / winrate) - 1
df['required_risk_reward'] = (1 / df['winrate']) - 1
df["required_risk_reward"] = (1 / df["winrate"]) - 1
# expectancy = (risk_reward_ratio * winrate) - (lossrate)
df['expectancy'] = (df['risk_reward_ratio'] * df['winrate']) - (1 - df['winrate'])
df["expectancy"] = (df["risk_reward_ratio"] * df["winrate"]) - (1 - df["winrate"])
# sort by expectancy and stoploss
df = df.sort_values(by=['expectancy', 'stoploss'], ascending=False).groupby(
'pair').first().sort_values(by=['expectancy'], ascending=False).reset_index()
df = (
df.sort_values(by=["expectancy", "stoploss"], ascending=False)
.groupby("pair")
.first()
.sort_values(by=["expectancy"], ascending=False)
.reset_index()
)
final = {}
for x in df.itertuples():
@@ -381,17 +411,17 @@ class Edge:
x.required_risk_reward,
x.expectancy,
x.nb_trades,
x.avg_trade_duration
x.avg_trade_duration,
)
# Returning a list of pairs in order of "expectancy"
return final
def _find_trades_for_stoploss_range(self, df, pair: str, stoploss_range) -> list:
buy_column = df['enter_long'].values
sell_column = df['exit_long'].values
date_column = df['date'].values
ohlc_columns = df[['open', 'high', 'low', 'close']].values
buy_column = df["enter_long"].values
sell_column = df["exit_long"].values
date_column = df["date"].values
ohlc_columns = df[["open", "high", "low", "close"]].values
result: list = []
for stoploss in stoploss_range:
@@ -401,8 +431,9 @@ class Edge:
return result
def _detect_next_stop_or_sell_point(self, buy_column, sell_column, date_column,
ohlc_columns, stoploss, pair: str):
def _detect_next_stop_or_sell_point(
self, buy_column, sell_column, date_column, ohlc_columns, stoploss, pair: str
):
"""
Iterate through ohlc_columns in order to find the next trade
Next trade opens from the first buy signal noticed to
@@ -429,27 +460,28 @@ class Edge:
open_trade_index += 1
open_price = ohlc_columns[open_trade_index, 0]
stop_price = (open_price * (stoploss + 1))
stop_price = open_price * (stoploss + 1)
# Searching for the index where stoploss is hit
stop_index = utf1st.find_1st(
ohlc_columns[open_trade_index:, 2], stop_price, utf1st.cmp_smaller)
ohlc_columns[open_trade_index:, 2], stop_price, utf1st.cmp_smaller
)
# If we don't find it then we assume stop_index will be far in future (infinite number)
if stop_index == -1:
stop_index = float('inf')
stop_index = float("inf")
# Searching for the index where sell is hit
sell_index = utf1st.find_1st(sell_column[open_trade_index:], 1, utf1st.cmp_equal)
# If we don't find it then we assume sell_index will be far in future (infinite number)
if sell_index == -1:
sell_index = float('inf')
sell_index = float("inf")
# Check if we don't find any stop or sell point (in that case trade remains open)
# It is not interesting for Edge to consider it so we simply ignore the trade
# And stop iterating there is no more entry
if stop_index == sell_index == float('inf'):
if stop_index == sell_index == float("inf"):
break
if stop_index <= sell_index:
@@ -467,17 +499,18 @@ class Edge:
exit_type = ExitType.EXIT_SIGNAL
exit_price = ohlc_columns[exit_index, 0]
trade = {'pair': pair,
'stoploss': stoploss,
'profit_ratio': '',
'profit_abs': '',
'open_date': date_column[open_trade_index],
'close_date': date_column[exit_index],
'trade_duration': '',
'open_rate': round(open_price, 15),
'close_rate': round(exit_price, 15),
'exit_type': exit_type
}
trade = {
"pair": pair,
"stoploss": stoploss,
"profit_ratio": "",
"profit_abs": "",
"open_date": date_column[open_trade_index],
"close_date": date_column[exit_index],
"trade_duration": "",
"open_rate": round(open_price, 15),
"close_rate": round(exit_price, 15),
"exit_type": exit_type,
}
result.append(trade)

View File

@@ -5,6 +5,7 @@ class BacktestState(Enum):
"""
Bot application states
"""
STARTUP = 1
DATALOAD = 2
ANALYZE = 3

View File

@@ -3,6 +3,7 @@ from enum import Enum
class CandleType(str, Enum):
"""Enum to distinguish candle types"""
SPOT = "spot"
FUTURES = "futures"
MARK = "mark"
@@ -17,14 +18,14 @@ class CandleType(str, Enum):
return f"{self.name.lower()}"
@staticmethod
def from_string(value: str) -> 'CandleType':
def from_string(value: str) -> "CandleType":
if not value:
# Default to spot
return CandleType.SPOT
return CandleType(value)
@staticmethod
def get_default(trading_mode: str) -> 'CandleType':
if trading_mode == 'futures':
def get_default(trading_mode: str) -> "CandleType":
if trading_mode == "futures":
return CandleType.FUTURES
return CandleType.SPOT

View File

@@ -5,10 +5,11 @@ class ExitCheckTuple:
"""
NamedTuple for Exit type + reason
"""
exit_type: ExitType
exit_reason: str = ''
def __init__(self, exit_type: ExitType, exit_reason: str = ''):
exit_type: ExitType
exit_reason: str = ""
def __init__(self, exit_type: ExitType, exit_reason: str = ""):
self.exit_type = exit_type
self.exit_reason = exit_reason or exit_type.value

View File

@@ -5,6 +5,7 @@ class ExitType(Enum):
"""
Enum to distinguish between exit reasons
"""
ROI = "roi"
STOP_LOSS = "stop_loss"
STOPLOSS_ON_EXCHANGE = "stoploss_on_exchange"

View File

@@ -2,7 +2,8 @@ from enum import Enum
class HyperoptState(Enum):
""" Hyperopt states """
"""Hyperopt states"""
STARTUP = 1
DATALOAD = 2
INDICATORS = 3

Some files were not shown because too many files have changed in this diff Show More