Merge branch 'develop' into optuna

This commit is contained in:
Matthias
2025-05-08 19:25:33 +02:00
156 changed files with 12489 additions and 5844 deletions

View File

@@ -12,7 +12,7 @@ Have you searched for similar issues before posting it?
If you have discovered a bug in the bot, please [search the issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue).
If it hasn't been reported, please create a new issue.
Please do not use bug reports to request new features.
Please do not use the bug report template to request new features.
-->
## Describe your environment

View File

@@ -8,9 +8,12 @@ assignees: ''
---
<!--
Have you searched for similar issues before posting it?
Did you have a VERY good look at the [documentation](https://www.freqtrade.io/en/latest/) and are sure that the question is not explained there
Did you have a VERY good look at the [documentation](https://www.freqtrade.io/) and are sure that the question is not explained there
Please do not use the question template to report bugs or to request new features.
Has your strategy or configuration been generated by an AI model, and is now not working?
Please consult the documentation. We'll close such issues and point to the documentation.
-->
## Describe your environment
@@ -22,4 +25,4 @@ Please do not use the question template to report bugs or to request new feature
## Your question
*Ask the question you have not been able to find an answer in the [Documentation](https://www.freqtrade.io/en/latest/)*
*Ask the question you have not been able to find an answer in the [Documentation](https://www.freqtrade.io/)*

View File

@@ -34,7 +34,7 @@ jobs:
run: python build_helpers/binance_update_lev_tiers.py
- uses: peter-evans/create-pull-request@v7
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
token: ${{ secrets.REPO_SCOPED_TOKEN }}
add-paths: freqtrade/exchange/binance_leverage_tiers.json

View File

@@ -38,8 +38,9 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1
with:
activate-environment: true
enable-cache: true
python-version: ${{ matrix.python-version }}
cache-dependency-glob: "requirements**.txt"
@@ -144,7 +145,7 @@ jobs:
mypy freqtrade scripts tests
- name: Discord notification
uses: rjstone/discord-webhook-notify@v1
uses: rjstone/discord-webhook-notify@1399c1b2d57cc05894d506d2cfdc33c5f012b993 #v1.1.1
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
with:
severity: error
@@ -170,8 +171,9 @@ jobs:
check-latest: true
- name: Install uv
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1
with:
activate-environment: true
enable-cache: true
python-version: ${{ matrix.python-version }}
cache-dependency-glob: "requirements**.txt"
@@ -270,7 +272,7 @@ jobs:
mypy freqtrade scripts
- name: Discord notification
uses: rjstone/discord-webhook-notify@v1
uses: rjstone/discord-webhook-notify@1399c1b2d57cc05894d506d2cfdc33c5f012b993 #v1.1.1
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
with:
severity: info
@@ -296,8 +298,9 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1
with:
activate-environment: true
enable-cache: true
python-version: ${{ matrix.python-version }}
cache-dependency-glob: "requirements**.txt"
@@ -363,7 +366,7 @@ jobs:
shell: powershell
- name: Discord notification
uses: rjstone/discord-webhook-notify@v1
uses: rjstone/discord-webhook-notify@1399c1b2d57cc05894d506d2cfdc33c5f012b993 #v1.1.1
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
with:
severity: error
@@ -397,7 +400,7 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- uses: pre-commit/action@v3.0.1
- uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
docs-check:
runs-on: ubuntu-22.04
@@ -421,7 +424,7 @@ jobs:
mkdocs build
- name: Discord notification
uses: rjstone/discord-webhook-notify@v1
uses: rjstone/discord-webhook-notify@1399c1b2d57cc05894d506d2cfdc33c5f012b993 #v1.1.1
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
with:
severity: error
@@ -431,7 +434,7 @@ jobs:
build-linux-online:
# Run pytest with "live" checks
runs-on: ubuntu-22.04
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
with:
@@ -443,8 +446,9 @@ jobs:
python-version: "3.12"
- name: Install uv
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1
with:
activate-environment: true
enable-cache: true
python-version: "3.12"
cache-dependency-glob: "requirements**.txt"
@@ -501,14 +505,14 @@ jobs:
- name: Check user permission
id: check
uses: scherermichael-oss/action-has-permission@1.0.6
uses: scherermichael-oss/action-has-permission@136e061bfe093832d87f090dd768e14e27a740d3 # 1.0.6
with:
required-permission: write
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Discord notification
uses: rjstone/discord-webhook-notify@v1
uses: rjstone/discord-webhook-notify@1399c1b2d57cc05894d506d2cfdc33c5f012b993 #v1.1.1
if: always() && steps.check.outputs.has-permission && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
with:
severity: info
@@ -580,7 +584,7 @@ jobs:
merge-multiple: true
- name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@v1.12.4
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4
with:
repository-url: https://test.pypi.org/legacy/
@@ -609,7 +613,7 @@ jobs:
merge-multiple: true
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.12.4
uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4
deploy-docker:
@@ -650,11 +654,11 @@ jobs:
docker version -f '{{.Server.Experimental}}'
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 #v3.10.0
- name: Available platforms
run: echo ${PLATFORMS}
@@ -703,7 +707,7 @@ jobs:
build_helpers/publish_docker_arm64.sh
- name: Discord notification
uses: rjstone/discord-webhook-notify@v1
uses: rjstone/discord-webhook-notify@1399c1b2d57cc05894d506d2cfdc33c5f012b993 #v1.1.1
if: always() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && (github.event_name != 'schedule')
with:
severity: info

View File

@@ -28,13 +28,13 @@ jobs:
with:
persist-credentials: false
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Pre-build dev container image
uses: devcontainers/ci@v0.3
uses: devcontainers/ci@8bf61b26e9c3a98f69cb6ce2f88d24ff59b785c6 # v0.3.19
with:
subFolder: .github
imageName: ghcr.io/${{ github.repository }}-devcontainer

View File

@@ -16,7 +16,7 @@ jobs:
persist-credentials: false
- name: Docker Hub Description
uses: peter-evans/dockerhub-description@v4
uses: peter-evans/dockerhub-description@432a30c9e07499fd01da9f8a49f0faf9e0ca5b77 # v4.0.2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}

View File

@@ -28,7 +28,7 @@ jobs:
- name: Run auto-update
run: pre-commit autoupdate
- uses: peter-evans/create-pull-request@v7
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
token: ${{ secrets.REPO_SCOPED_TOKEN }}
add-paths: .pre-commit-config.yaml

View File

@@ -1,8 +1,20 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: local
# Keep json schema in sync with the config schema
# This will write the files - and fail pre-commit if a file has been changed.
hooks:
- id: Extract config json schema
name: extract-config-json-schema
entry: "python build_helpers/extract_config_json_schema.py"
language: python
pass_filenames: false
additional_dependencies: ["python-rapidjson", "jsonschema"]
- repo: https://github.com/pycqa/flake8
rev: "7.1.2"
rev: "7.2.0"
hooks:
- id: flake8
additional_dependencies: [Flake8-pyproject]
@@ -16,10 +28,10 @@ repos:
additional_dependencies:
- types-cachetools==5.5.0.20240820
- types-filelock==3.2.7
- types-requests==2.32.0.20250306
- types-requests==2.32.0.20250328
- types-tabulate==0.9.0.20241207
- types-python-dateutil==2.9.0.20241206
- SQLAlchemy==2.0.39
- SQLAlchemy==2.0.40
# stages: [push]
- repo: https://github.com/pycqa/isort
@@ -31,7 +43,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.11.2'
rev: 'v0.11.8'
hooks:
- id: ruff
- id: ruff-format
@@ -70,6 +82,6 @@ repos:
# Ensure github actions remain safe
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.5.2
rev: v1.6.0
hooks:
- id: zizmor

View File

@@ -1,4 +1,4 @@
FROM python:3.12.9-slim-bookworm as base
FROM python:3.12.10-slim-bookworm as base
# Setup env
ENV LANG C.UTF-8

View File

@@ -12,7 +12,12 @@ secret = os.environ.get("FREQTRADE__EXCHANGE__SECRET")
proxy = os.environ.get("CI_WEB_PROXY")
exchange = ccxt.binance(
{"apiKey": key, "secret": secret, "httpsProxy": proxy, "options": {"defaultType": "swap"}}
{
"apiKey": key,
"secret": secret,
"httpsProxy": proxy,
"options": {"defaultType": "swap"},
}
)
_ = exchange.load_markets()

View File

@@ -1,4 +1,4 @@
import subprocess
import subprocess # noqa: S404, RUF100
from pathlib import Path

View File

@@ -4,10 +4,23 @@ from pathlib import Path
import rapidjson
from freqtrade.configuration.config_schema import CONF_SCHEMA
def extract_config_json_schema():
try:
# Try to import from the installed package
from freqtrade.config_schema import CONF_SCHEMA
except ImportError:
# If freqtrade is not installed, add the parent directory to sys.path
# to import directly from the source
import sys
script_dir = Path(__file__).parent
freqtrade_dir = script_dir.parent
sys.path.insert(0, str(freqtrade_dir))
# Now try to import from the source
from freqtrade.config_schema import CONF_SCHEMA
schema_filename = Path(__file__).parent / "schema.json"
with schema_filename.open("w") as f:
rapidjson.dump(CONF_SCHEMA, f, indent=2)

View File

@@ -1032,6 +1032,7 @@
"type": "string",
"enum": [
"running",
"paused",
"stopped"
]
},
@@ -1515,6 +1516,14 @@
"type": "boolean",
"default": false
},
"indicator_periods_candles": {
"description": "Time periods to calculate indicators for. The indicators are added to the base indicator dataset.",
"type": "array",
"items": {
"type": "number",
"minimum": 1
}
},
"use_SVM_to_remove_outliers": {
"description": "Use SVM to remove outliers from the features.",
"type": "boolean",

View File

@@ -1,4 +1,4 @@
FROM python:3.11.11-slim-bookworm as base
FROM python:3.11.12-slim-bookworm as base
# Setup env
ENV LANG C.UTF-8

View File

@@ -177,7 +177,7 @@ sudo loginctl enable-linger "$USER"
If you run the bot as a service, you can use systemd service manager as a software watchdog monitoring freqtrade bot
state and restarting it in the case of failures. If the `internals.sd_notify` parameter is set to true in the
configuration or the `--sd-notify` command line option is used, the bot will send keep-alive ping messages to systemd
using the sd_notify (systemd notifications) protocol and will also tell systemd its current state (Running or Stopped)
using the sd_notify (systemd notifications) protocol and will also tell systemd its current state (Running, Paused or Stopped)
when it changes.
The `freqtrade.service.watchdog` file contains an example of the service unit configuration file which uses systemd
@@ -189,13 +189,15 @@ as the watchdog.
## Advanced Logging
Freqtrade uses the default logging module provided by python.
Python allows for extensive [logging configuration](https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig) in this regards - way more than what can be covered here.
Python allows for extensive [logging configuration](https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig) in this regard - way more than what can be covered here.
Default logging (Colored terminal output) is setup by default if no `log_config` is provided.
Default logging format (coloured terminal output) is set up by default if no `log_config` is provided in your freqtrade configuration.
Using `--logfile logfile.log` will enable the RotatingFileHandler.
If you're not content with the log format - or with the default settings provided for the RotatingFileHandler, you can customize logging to your liking.
The default configuration looks roughly like the below - with the file handler being provided - but not enabled.
If you're not content with the log format, or with the default settings provided for the RotatingFileHandler, you can customize logging to your liking by adding the `log_config` configuration to your freqtrade configuration file(s).
The default configuration looks roughly like the below, with the file handler being provided but not enabled as the `filename` is commented out.
Uncomment this line and supply a valid path/filename to enable it.
``` json hl_lines="5-7 13-16 27"
{
@@ -237,12 +239,12 @@ The default configuration looks roughly like the below - with the file handler b
Highlighted lines in the above code-block define the Rich handler and belong together.
The formatter "standard" and "file" will belong to the FileHandler.
Each handler must use one of the defined formatters (by name) - and it's class must be available and a valid logging class.
To actually use a handler - it must be in the "handlers" section inside the "root" segment.
Each handler must use one of the defined formatters (by name), its class must be available, and must be a valid logging class.
To actually use a handler, it must be in the "handlers" section inside the "root" segment.
If this section is left out, freqtrade will provide no output (in the non-configured handler, anyway).
!!! Tip "Explicit log configuration"
We recommend to extract the logging configuration from your main configuration, and provide it to your bot via [multiple configuration files](configuration.md#multiple-configuration-files) functionality. This will avoid unnecessary code duplication.
We recommend to extract the logging configuration from your main freqtrade configuration file, and provide it to your bot via [multiple configuration files](configuration.md#multiple-configuration-files) functionality. This will avoid unnecessary code duplication.
---

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

View File

@@ -59,7 +59,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -30,7 +30,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -89,7 +89,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -45,7 +45,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -34,7 +34,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -63,7 +63,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -50,7 +50,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -55,7 +55,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -37,7 +37,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -79,6 +79,7 @@ options:
SortinoHyperOptLoss, SortinoHyperOptLossDaily,
CalmarHyperOptLoss, MaxDrawDownHyperOptLoss,
MaxDrawDownRelativeHyperOptLoss,
MaxDrawDownPerPairHyperOptLoss,
ProfitDrawDownHyperOptLoss, MultiMetricHyperOptLoss
--disable-param-export
Disable automatic hyperopt parameter export.
@@ -102,7 +103,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -1,9 +1,12 @@
```
usage: freqtrade install-ui [-h] [--erase] [--ui-version UI_VERSION]
usage: freqtrade install-ui [-h] [--erase] [--prerelease]
[--ui-version UI_VERSION]
options:
-h, --help show this help message and exit
--erase Clean UI folder, don't download new version.
--prerelease Install the latest pre-release version of FreqUI. This
is not recommended for production use.
--ui-version UI_VERSION
Specify a specific version of FreqUI to install. Not
specifying this installs the latest version.

View File

@@ -41,7 +41,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -22,7 +22,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -24,7 +24,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -24,7 +24,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -39,7 +39,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -39,7 +39,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -27,7 +27,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -23,7 +23,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -89,7 +89,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -63,7 +63,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -49,7 +49,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -41,7 +41,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -30,7 +30,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -34,7 +34,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -36,7 +36,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -41,7 +41,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -20,7 +20,9 @@ Common arguments:
exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data.
Path to the base directory of the exchange with
historical backtesting data. To see futures data, use
trading-mode additionally.
--userdir PATH, --user-data-dir PATH
Path to userdata directory.

View File

@@ -205,7 +205,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `exit_pricing.use_order_book` | Enable exiting of open trades using [Order Book Exit](#exit-price-with-orderbook-enabled). <br> *Defaults to `true`.*<br> **Datatype:** Boolean
| `exit_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to exit. I.e. a value of 2 will allow the bot to pick the 2nd ask rate in [Order Book Exit](#exit-price-with-orderbook-enabled)<br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
| `custom_price_max_distance_ratio` | Configure maximum distance ratio between current and custom entry or exit price. <br>*Defaults to `0.02` 2%).*<br> **Datatype:** Positive float
| | **TODO**
| | **Order/Signal handling**
| `use_exit_signal` | Use exit signals produced by the strategy in addition to the `minimal_roi`. <br>Setting this to false disables the usage of `"exit_long"` and `"exit_short"` columns. Has no influence on other exit methods (Stoploss, ROI, callbacks). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
| `exit_profit_only` | Wait until the bot reaches `exit_profit_offset` before taking an exit decision. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
| `exit_profit_offset` | Exit-signal is only active above this value. Only active in combination with `exit_profit_only=True`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0`.* <br> **Datatype:** Float (as ratio)
@@ -266,7 +266,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `bot_name` | Name of the bot. Passed via API to a client - can be shown to distinguish / name bots.<br> *Defaults to `freqtrade`*<br> **Datatype:** String
| `external_message_consumer` | Enable [Producer/Consumer mode](producer-consumer.md) for more details. <br> **Datatype:** Dict
| | **Other**
| `initial_state` | Defines the initial application state. If set to stopped, then the bot has to be explicitly started via `/start` RPC command. <br>*Defaults to `stopped`.* <br> **Datatype:** Enum, either `stopped` or `running`
| `initial_state` | Defines the initial application state. If set to stopped, then the bot has to be explicitly started via `/start` RPC command. <br>*Defaults to `stopped`.* <br> **Datatype:** Enum, either `running`, `paused` or `stopped`
| `force_entry_enable` | Enables the RPC Commands to force a Trade entry. More information below. <br> **Datatype:** Boolean
| `disable_dataframe_checks` | Disable checking the OHLCV dataframe returned from the strategy methods for correctness. Only use when intentionally changing the dataframe and understand what you are doing. [Strategy Override](#parameters-in-the-strategy).<br> *Defaults to `False`*. <br> **Datatype:** Boolean
| `internals.process_throttle_secs` | Set the process throttle, or minimum loop duration for one bot iteration loop. Value in second. <br>*Defaults to `5` seconds.* <br> **Datatype:** Positive Integer
@@ -281,7 +281,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
| `add_config_files` | Additional config files. These files will be loaded and merged with the current config file. The files are resolved relative to the initial file.<br> *Defaults to `[]`*. <br> **Datatype:** List of strings
| `dataformat_ohlcv` | Data format to use to store historical candle (OHLCV) data. <br> *Defaults to `feather`*. <br> **Datatype:** String
| `dataformat_trades` | Data format to use to store historical trades data. <br> *Defaults to `feather`*. <br> **Datatype:** String
| `reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage (and decreasing train/inference timing in FreqAI). (Currently only affects FreqAI use-cases) <br> **Datatype:** Boolean. <br> Default: `False`.
| `reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage (and decreasing train/inference timing backtesting/hyperopt and in FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.
| `log_config` | Dictionary containing the log config for python logging. [more info](advanced-setup.md#advanced-logging) <br> **Datatype:** dict. <br> Default: `FtRichHandler`
### Parameters in the strategy

View File

@@ -363,6 +363,10 @@ Hyperliquid handles deposits and withdrawals on the Arbitrum One chain, a Layer
* Create a different software wallet, only transfer the funds you want to trade with to that wallet, and use that wallet to trade on Hyperliquid.
* If you have funds you don't want to use for trading (after making a profit for example), transfer them back to your hardware wallet.
### Historic Hyperliquid data
The Hyperliquid API does not provide historic data beyond the single call to fetch current data, so downloading data is not possible, as the downloaded data would not constitute proper historic data.
## All exchanges
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.

View File

@@ -258,6 +258,8 @@ freqtrade trade --config config_examples/config_freqai.example.json --strategy F
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file.
This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
PyTorch dropped support for macOS x64 (intel based Apple devices) in version 2.3. Subsequently, freqtrade also dropped support for PyTorch on this platform.
### Structure
#### Model

View File

@@ -181,7 +181,7 @@ You can ask for each of the defined features to be included also for informative
In total, the number of features the user of the presented example strategy has created is: length of `include_timeframes` * no. features in `feature_engineering_expand_*()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
$= 3 * 3 * 3 * 2 * 2 = 108$.
!!! note "Learn more about creative feature engineering"
!!! note "Learn more about creative feature engineering"
Check out our [medium article](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665) geared toward helping users learn how to creatively engineer features.
### Gain finer control over `feature_engineering_*` functions with `metadata`

View File

@@ -471,6 +471,7 @@ Currently, the following loss functions are builtin:
* `SortinoHyperOptLossDaily` - optimizes Sortino Ratio calculated on **daily** trade returns relative to **downside** standard deviation.
* `MaxDrawDownHyperOptLoss` - Optimizes Maximum absolute drawdown.
* `MaxDrawDownRelativeHyperOptLoss` - Optimizes both maximum absolute drawdown while also adjusting for maximum relative drawdown.
* `MaxDrawDownPerPairHyperOptLoss` - Calculates the profit/drawdown ratio per pair and returns the worst result as objective, forcing hyperopt to optimize the parameters for all pairs in the pairlist. This way, we prevent one or more pairs with good results from inflating the metrics, while the pairs with poor results are not represented and therefore not optimized.
* `CalmarHyperOptLoss` - Optimizes Calmar Ratio calculated on trade returns relative to max drawdown.
* `ProfitDrawDownHyperOptLoss` - Optimizes by max Profit & min Drawdown objective. `DRAWDOWN_MULT` variable within the hyperoptloss file can be adjusted to be stricter or more flexible on drawdown purposes.
* `MultiMetricHyperOptLoss` - Optimizes by several key metrics to achieve balanced performance. The primary focus is on maximizing Profit and minimizing Drawdown, while also considering additional metrics such as Profit Factor, Expectancy Ratio and Winrate. Moreover, it applies a penalty for epochs with a low number of trades, encouraging strategies with adequate trade frequency.

View File

@@ -3,7 +3,7 @@ This section will highlight a few projects from members of the community.
The projects below are for the most part not maintained by the freqtrade , therefore use your own caution before using them.
- [Example freqtrade strategies](https://github.com/freqtrade/freqtrade-strategies/)
- [FrequentHippo - Grafana dashboard with dry/live runs and backtests](http://frequenthippo.ddns.net:3000/) (by hippocritical).
- [FrequentHippo - Statistics of dry/live runs and backtests](http://frequenthippo.ddns.net) (by hippocritical).
- [Online pairlist generator](https://remotepairlist.com/) (by Blood4rc).
- [Freqtrade Backtesting Project](https://strat.ninja/) (by Blood4rc).
- [Freqtrade analysis notebook](https://github.com/froggleston/freqtrade_analysis_notebook) (by Froggleston).

View File

@@ -1,23 +1,21 @@
# Lookahead analysis
This page explains how to validate your strategy in terms of look ahead bias.
This page explains how to validate your strategy in terms of lookahead bias.
Checking look ahead bias is the bane of any strategy since it is sometimes very easy to introduce backtest bias -
but very hard to detect.
Lookahead bias is the bane of any strategy since it is sometimes very easy to introduce this bias, but can be very hard to detect.
Backtesting initializes all timestamps at once and calculates all indicators in the beginning.
This means that if your indicators or entry/exit signals could look into future candles and falsify your backtest.
Backtesting initializes all timestamps (loads the whole dataframe into memory) and calculates all indicators at once.
This means that if your indicators or entry/exit signals look into future candles, this will falsify your backtest.
Lookahead-analysis requires historic data to be available.
The `lookahead-analysis` command requires historic data to be available.
To learn how to get data for the pairs and exchange you're interested in,
head over to the [Data Downloading](data-download.md) section of the documentation.
`lookahead-analysis` also supports freqai strategies.
This command is built upon backtesting since it internally chains backtests and pokes at the strategy to provoke it to show look ahead bias.
This is done by not looking at the strategy itself - but at the results it returned.
The results are things like changed indicator-values and moved entries/exits compared to the full backtest.
This command internally chains backtests and pokes at the strategy to provoke it to show lookahead bias.
This is done by not looking at the strategy code itself, but at changed indicator values and moved entries/exits compared to the full backtest.
You can use commands of [Backtesting](backtesting.md).
It also supports the lookahead-analysis of freqai strategies.
`lookahead-analysis` can use the typical options of [Backtesting](backtesting.md), but forces the following options:
- `--cache` is forced to "none".
- `--max-open-trades` is forced to be at least equal to the number of pairs.
@@ -25,48 +23,83 @@ It also supports the lookahead-analysis of freqai strategies.
- `--stake-amount` is forced to be a static 10000 (10k).
- `--enable-protections` is forced to be off.
Those are set to avoid users accidentally generating false positives.
These are set to avoid users accidentally generating false positives.
## Lookahead-analysis command reference
--8<-- "commands/lookahead-analysis.md"
!!! Note ""
The above Output was reduced to options `lookahead-analysis` adds on top of regular backtesting commands.
### Summary
Checks a given strategy for look ahead bias via lookahead-analysis
Look ahead bias means that the backtest uses data from future candles thereby not making it viable beyond backtesting
and producing false hopes for the one backtesting.
!!! Note
The above output was reduced to options that `lookahead-analysis` adds on top of regular backtesting commands.
### Introduction
Many strategies - without the programmer knowing - have fallen prey to look ahead bias.
Many strategies, without the programmer knowing, have fallen prey to lookahead bias.
This typically makes the strategy backtest look profitable, sometimes to extremes, but this is not realistic as the strategy is "cheating" by looking at data it would not have in dry or live modes.
Any backtest will populate the full dataframe including all time stamps at the beginning.
If the programmer is not careful or oblivious how things work internally
(which sometimes can be really hard to find out) then it will just look into the future making the strategy amazing
but not realistic.
The reason why strategies can "cheat" is because the freqtrade backtesting process populates the full dataframe including all candle timestamps at the outset.
If the programmer is not careful or oblivious how things work internally
(which sometimes can be really hard to find out) then the strategy will look into the future.
This command is made to try to verify the validity in the form of the aforementioned look ahead bias.
This command is made to try to verify the validity in the form of the aforementioned lookahead bias.
### How does the command work?
It will start with a backtest of all pairs to generate a baseline for indicators and entries/exits.
After the backtest ran, it will look if the `minimum-trade-amount` is met
and if not cancel the lookahead-analysis for this strategy.
After this initial backtest runs, it will look if the `minimum-trade-amount` is met and if not cancel the lookahead-analysis for this strategy.
If this happens, use a wider timerange to get more trades for the analysis, or use a timerange where more trades occur.
After setting the baseline it will then do additional runs for every entry and exit separately.
When a verification-backtest is done, it will compare the indicators as the signal (either entry or exit) and report the bias.
After all signals have been verified or falsified a result-table will be generated for the user to see.
After setting the baseline it will then do additional backtest runs for every entry and exit separately.
When these verification backtests complete, it will compare the indicators at the signal candles (both entry or exit)
and report the bias.
After all signals have been verified or falsified a result table will be generated for the user to see.
### How to find and remove bias? How can I salvage a biased strategy?
If you found a biased strategy online and want to have the same results, just without bias,
then you will be out of luck most of the time.
Usually the bias in the strategy is THE driving factor for "too good to be true" profits.
Removing conditions or indicators that push the profits up from bias will usually make the strategy significantly worse.
You might be able to salvage it partially if the biased indicators or conditions are not the core of the strategy, or there
are other entry and exit signals that are not biased.
### Examples of lookahead-bias
- `shift(-10)` looks 10 candles into the future.
- Using `iloc[]` in populate_* functions to access a specific row in the dataframe.
- For-loops are prone to introduce lookahead bias if you don't tightly control which numbers are looped through.
- Aggregation functions like `.mean()`, `.min()` and `.max()`, without a rolling window,
will calculate the value over the **whole** dataframe, so the signal candle will "see" a value including future candles.
A non-biased example would be to look back candles using `rolling()` instead:
e.g. `dataframe['volume_mean_12'] = dataframe['volume'].rolling(12).mean()`
- `ta.MACD(dataframe, 12, 26, 1)` will introduce bias with a signalperiod of 1.
### What do the columns in the results table mean?
- `filename`: name of the checked strategy file
- `strategy`: checked strategy class name
- `has_bias`: result of the lookahead-analysis. `No` would be good, `Yes` would be bad.
- `total_signals`: number of checked signals (default is 20)
- `biased_entry_signals`: found bias in that many entries
- `biased_exit_signals`: found bias in that many exits
- `biased_indicators`: shows you the indicators themselves that are defined in populate_indicators
You might get false positives in the `biased_exit_signals` if you have biased entry signals paired with those exits.
However, a biased entry will usually result in a biased exit too,
even if the exit itself does not produce the bias -
especially if your entry and exit conditions use the same biased indicator.
**Address the bias in the entries first, then address the exits.**
### Caveats
- `lookahead-analysis` can only verify / falsify the trades it calculated and verified.
If the strategy has many different signals / signal types, it's up to you to select appropriate parameters to ensure that all signals have triggered at least once. Not triggered signals will not have been verified.
This could lead to a false-negative (the strategy will then be reported as non-biased).
- `lookahead-analysis` has access to everything that backtesting has too.
Please don't provoke any configs like enabling position stacking.
If you decide to do so, then make doubly sure that you won't ever run out of `max_open_trades` amount and neither leftover money in your wallet.
- In the results table, the `biased_indicators` column will falsely flag FreqAI target indicators defined in `set_freqai_targets()` as biased. These are not biased and can safely be ignored.
If the strategy has many different signals / signal types, it's up to you to select appropriate parameters to ensure that all signals have triggered at least once. Signals that are not triggered will not have been verified.
This would lead to a false-negative, i.e. the strategy will be reported as non-biased.
- `lookahead-analysis` has access to the same backtesting options and this can introduce problems.
Please don't use any options like enabling position stacking as this will distort the number of checked signals.
If you decide to do so, then make doubly sure that you won't ever run out of `max_open_trades` slots,
and that you have enough capital in the backtest wallet configuration.
- In the results table, the `biased_indicators` column
will falsely flag FreqAI target indicators defined in `set_freqai_targets()` as biased.
**These are not biased and can safely be ignored.**

View File

@@ -50,6 +50,7 @@ Enable subscribing to an instance by adding the `external_message_consumer` sect
| `ping_timeout` | Ping timeout <br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
| `sleep_time` | Sleep time before retrying to connect.<br>*Defaults to `10`.*<br> **Datatype:** Integer - in seconds.
| `remove_entry_exit_signals` | Remove signal columns from the dataframe (set them to 0) on dataframe receipt.<br>*Defaults to `false`.*<br> **Datatype:** Boolean.
| `initial_candle_limit` | Initial candles to expect from the Producer.<br>*Defaults to `1500`.*<br> **Datatype:** Integer - Number of candles.
| `message_size_limit` | Size limit per message<br>*Defaults to `8`.*<br> **Datatype:** Integer - Megabytes.
Instead of (or as well as) calculating indicators in `populate_indicators()` the follower instance listens on the connection to a producer instance's messages (or multiple producer instances in advanced configurations) and requests the producer's most recently analyzed dataframes for each pair in the active whitelist.

View File

@@ -1,7 +1,7 @@
markdown==3.7
markdown==3.8
mkdocs==1.6.1
mkdocs-material==9.6.9
mkdocs-material==9.6.12
mdx_truly_sane_lists==1.3
pymdown-extensions==10.14.3
pymdown-extensions==10.15
jinja2==3.1.6
mike==2.1.3

View File

@@ -268,6 +268,9 @@ show_config
start
Start the bot if it's in the stopped state.
pause
Pause the bot if it's in the running state. If triggered on stopped state will handle open positions.
stats
Return the stats report (durations, sell-reasons).
@@ -333,6 +336,7 @@ All endpoints in the below table need to be prefixed with the base URL of the AP
|-----------|--------|--------------------------|
| `/ping` | GET | Simple command testing the API Readiness - requires no authentication.
| `/start` | POST | Starts the trader.
| `/pause` | POST | Pause the trader. Gracefully handle open trades according to their rules. Do not enter new positions.
| `/stop` | POST | Stops the trader.
| `/stopbuy` | POST | Stops the trader from opening new trades. Gracefully closes open trades according to their rules.
| `/reload_config` | POST | Reloads the configuration file.

View File

@@ -165,18 +165,23 @@ If there is any significant difference, verify that your entry and exit signals
## Controlling or monitoring a running bot
Once your bot is running in dry or live mode, Freqtrade has five mechanisms to control or monitor a running bot:
Once your bot is running in dry or live mode, Freqtrade has six mechanisms to control or monitor a running bot:
- **[FreqUI](freq-ui.md)**: The easiest to get started with, FreqUI is a web interface to see and control current activity of your bot.
- **[Telegram](telegram-usage.md)**: On mobile devices, Telegram integration is available to get alerts about your bot activity and to control certain aspects.
- **[FTUI](https://github.com/freqtrade/ftui)**: FTUI is a terminal (command line) interface to Freqtrade, and allows monitoring of a running bot only.
- **[REST API](rest-api.md)**: The REST API allows programmers to develop their own tools to interact with a Freqtrade bot.
- **[freqtrade-client](rest-api.md#consuming-the-api)**: A python implementation of the REST API, making it easy to make requests and consume bot responses from your python apps or the command line.
- **[REST API endpoints](rest-api.md#available-endpoints)**: The REST API allows programmers to develop their own tools to interact with a Freqtrade bot.
- **[Webhooks](webhook-config.md)**: Freqtrade can send information to other services, e.g. discord, by webhooks.
### Logs
Freqtrade generates extensive debugging logs to help you understand what's happening. Please familiarise yourself with the information and error messages you might see in your bot logs.
Logging by default occurs on standard out (the command line). If you want to write out to a file instead, many freqtrade commands, including the `trade` command, accept the `--logfile` option to write to a file.
Check the [FAQ](faq.md#how-do-i-search-the-bot-logs-for-something) for examples.
## Final Thoughts
Algo trading is difficult, and most public strategies are not good performers due to the time and effort to make a strategy work profitably in multiple scenarios.

View File

@@ -1107,3 +1107,119 @@ class AwesomeStrategy(IStrategy):
return None
```
## Plot annotations callback
The plot annotations callback is called whenever freqUI requests data to display a chart.
This callback has no meaning in the trade cycle context and is only used for charting purposes.
The strategy can then return a list of `AnnotationType` objects to be displayed on the chart.
Depending on the content returned - the chart can display horizontal areas, vertical areas, or boxes.
The full object looks like this:
``` json
{
"type": "area", // Type of the annotation, currently only "area" is supported
"start": "2024-01-01 15:00:00", // Start date of the area
"end": "2024-01-01 16:00:00", // End date of the area
"y_start": 94000.2, // Price / y axis value
"y_end": 98000, // Price / y axis value
"color": "",
"label": "some label"
}
```
The below example will mark the chart with areas for the hours 8 and 15, with a grey color, highlighting the market open and close hours.
This is obviously a very basic example.
``` python
# Default imports
class AwesomeStrategy(IStrategy):
def plot_annotations(
self, pair: str, start_date: datetime, end_date: datetime, dataframe: DataFrame, **kwargs
) -> list[AnnotationType]:
"""
Retrieve area annotations for a chart.
Must be returned as array, with type, label, color, start, end, y_start, y_end.
All settings except for type are optional - though it usually makes sense to include either
"start and end" or "y_start and y_end" for either horizontal or vertical plots
(or all 4 for boxes).
:param pair: Pair that's currently analyzed
:param start_date: Start date of the chart data being requested
:param end_date: End date of the chart data being requested
:param dataframe: DataFrame with the analyzed data for the chart
:param **kwargs: Ensure to keep this here so updates to this won't break your strategy.
:return: List of AnnotationType objects
"""
annotations = []
while start_dt < end_date:
start_dt += timedelta(hours=1)
if start_dt.hour in (8, 15):
annotations.append(
{
"type": "area",
"label": "Trade open and close hours",
"start": start_dt,
"end": start_dt + timedelta(hours=1),
# Omitting y_start and y_end will result in a vertical area spanning the whole height of the main Chart
"color": "rgba(133, 133, 133, 0.4)",
}
)
return annotations
```
Entries will be validated, and won't be passed to the UI if they don't correspond to the expected schema and will log an error if they don't.
!!! Warning "Many annotations"
Using too many annotations can cause the UI to hang, especially when plotting large amounts of historic data.
Use the annotation feature with care.
### Plot annotations example
![FreqUI - plot Annotations](assets/freqUI-chart-annotations-dark.png#only-dark)
![FreqUI - plot Annotations](assets/freqUI-chart-annotations-light.png#only-light)
??? Info "Code used for the plot above"
This is an example code and should be treated as such.
``` python
# Default imports
class AwesomeStrategy(IStrategy):
def plot_annotations(
self, pair: str, start_date: datetime, end_date: datetime, dataframe: DataFrame, **kwargs
) -> list[AnnotationType]:
annotations = []
while start_dt < end_date:
start_dt += timedelta(hours=1)
if (start_dt.hour % 4) == 0:
mark_areas.append(
{
"type": "area",
"label": "4h",
"start": start_dt,
"end": start_dt + timedelta(hours=1),
"color": "rgba(133, 133, 133, 0.4)",
}
)
elif (start_dt.hour % 2) == 0:
price = dataframe.loc[dataframe["date"] == start_dt, ["close"]].mean()
mark_areas.append(
{
"type": "area",
"label": "2h",
"start": start_dt,
"end": start_dt + timedelta(hours=1),
"y_end": price * 1.01,
"y_start": price * 0.99,
"color": "rgba(0, 255, 0, 0.4)",
}
)
return annotations
```

View File

@@ -188,7 +188,7 @@ You can create your own keyboard in `config.json`:
!!! Note "Supported Commands"
Only the following commands are allowed. Command arguments are not supported!
`/start`, `/stop`, `/status`, `/status table`, `/trades`, `/profit`, `/performance`, `/daily`, `/stats`, `/count`, `/locks`, `/balance`, `/stopentry`, `/reload_config`, `/show_config`, `/logs`, `/whitelist`, `/blacklist`, `/edge`, `/help`, `/version`, `/marketdir`
`/start`, `/pause`, `/stop`, `/status`, `/status table`, `/trades`, `/profit`, `/performance`, `/daily`, `/stats`, `/count`, `/locks`, `/balance`, `/stopentry`, `/reload_config`, `/show_config`, `/logs`, `/whitelist`, `/blacklist`, `/edge`, `/help`, `/version`, `/marketdir`
## Telegram commands
@@ -200,8 +200,8 @@ official commands. You can ask at any moment for help with `/help`.
|----------|-------------|
| **System commands**
| `/start` | Starts the trader
| `/pause | /stopentry | /stopbuy` | Pause the trader. Gracefully handle open trades according to their rules. Do not enter new positions.
| `/stop` | Stops the trader
| `/stopbuy | /stopentry` | Stops the trader from opening new trades. Gracefully closes open trades according to their rules.
| `/reload_config` | Reloads the configuration file
| `/show_config` | Shows part of the current configuration with relevant settings to operation
| `/logs [limit]` | Show last log messages.
@@ -250,25 +250,27 @@ Below, example of Telegram message you will receive for each command.
> **Status:** `running`
### /pause | /stopentry | /stopbuy
> **Status:** `paused, no more entries will occur from now. Run /start to enable entries.`
Prevents the bot from opening new trades by changing the state to `paused`.
Open trades will continue to be managed according to their regular rules (ROI/exit signals, stop-loss, etc.).
Note that position adjustment remains active, but only on the exit side — meaning that when the bot is `paused`, it can only reduce the position size of open trades.
After this, give the bot time to close off open trades (can be checked via `/status table`).
Once all positions are closed, run `/stop` to completely stop the bot.
Use `/start` to resume the bot to the `running` state, allowing it to open new positions.
!!! Warning
The pause/stopentry signal is ONLY active while the bot is running, and is not persisted anyway, so restarting the bot will cause this to reset.
### /stop
> `Stopping trader ...`
> **Status:** `stopped`
### /stopbuy
> **status:** `Setting max_open_trades to 0. Run /reload_config to reset.`
Prevents the bot from opening new trades by temporarily setting "max_open_trades" to 0. Open trades will be handled via their regular rules (ROI / Sell-signal, stoploss, ...).
After this, give the bot time to close off open trades (can be checked via `/status table`).
Once all positions are sold, run `/stop` to completely stop the bot.
`/reload_config` resets "max_open_trades" to the value set in the configuration and resets this command.
!!! Warning
The stop-buy signal is ONLY active while the bot is running, and is not persisted anyway, so restarting the bot will cause this to reset.
### /status
For each open trade, the bot will send you the following message.

View File

@@ -25,6 +25,7 @@ The following attributes / properties are available for each individual trade -
| `close_date_utc` | datetime | Timestamp when trade was closed - in UTC. |
| `close_profit` | float | Relative profit at the time of trade closure. `0.01` == 1% |
| `close_profit_abs` | float | Absolute profit (in stake currency) at the time of trade closure. |
| `realized_profit` | float | Absolute already realized profit (in stake currency) while the trade is still open. |
| `leverage` | float | Leverage used for this trade - defaults to 1.0 in spot markets. |
| `enter_tag` | string | Tag provided on entry via the `enter_tag` column in the dataframe. |
| `is_short` | boolean | True for short trades, False otherwise. |

View File

@@ -1,12 +1,12 @@
"""Freqtrade bot"""
__version__ = "2025.4-dev"
__version__ = "2025.5-dev"
if "dev" in __version__:
from pathlib import Path
try:
import subprocess # noqa: S404
import subprocess # noqa: S404, RUF100
freqtrade_basedir = Path(__file__).parent

View File

@@ -21,6 +21,8 @@ ARGS_COMMON = [
"user_data_dir",
]
ARGS_MAIN = ["version_main"]
ARGS_STRATEGY = [
"strategy",
"strategy_path",
@@ -43,7 +45,8 @@ ARGS_COMMON_OPTIMIZE = [
"pairs",
]
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [
ARGS_BACKTEST = [
*ARGS_COMMON_OPTIMIZE,
"position_stacking",
"enable_protections",
"dry_run_wallet",
@@ -56,7 +59,8 @@ ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [
"freqai_backtest_live_models",
]
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [
ARGS_HYPEROPT = [
*ARGS_COMMON_OPTIMIZE,
"hyperopt",
"hyperopt_path",
"position_stacking",
@@ -76,7 +80,7 @@ ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [
"analyze_per_epoch",
]
ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"]
ARGS_EDGE = [*ARGS_COMMON_OPTIMIZE, "stoploss_range"]
ARGS_LIST_STRATEGIES = [
"strategy_path",
@@ -125,7 +129,7 @@ ARGS_BUILD_STRATEGY = ["user_data_dir", "strategy", "strategy_path", "template"]
ARGS_CONVERT_DATA_TRADES = ["pairs", "format_from_trades", "format_to", "erase", "exchange"]
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"]
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode", "candle_types"]
ARGS_CONVERT_DATA_OHLCV = [*ARGS_CONVERT_DATA, "timeframes", "trading_mode", "candle_types"]
ARGS_CONVERT_TRADES = [
"pairs",
@@ -191,7 +195,7 @@ ARGS_PLOT_PROFIT = [
ARGS_CONVERT_DB = ["db_url", "db_url_from"]
ARGS_INSTALL_UI = ["erase_ui_only", "ui_version"]
ARGS_INSTALL_UI = ["erase_ui_only", "ui_prerelease", "ui_version"]
ARGS_SHOW_TRADES = ["db_url", "trade_ids", "print_json"]
@@ -347,7 +351,7 @@ class Arguments:
self.parser = ArgumentParser(
prog="freqtrade", description="Free, open source crypto trading bot"
)
self._build_args(optionlist=["version_main"], parser=self.parser)
self._build_args(optionlist=ARGS_MAIN, parser=self.parser)
from freqtrade.commands import (
start_analysis_entries_exits,

View File

@@ -83,7 +83,8 @@ AVAILABLE_CLI_OPTIONS = {
"-d",
"--datadir",
"--data-dir",
help="Path to directory with historical backtesting data.",
help="Path to the base directory of the exchange with historical backtesting data. "
"To see futures data, use trading-mode additionally.",
metavar="PATH",
),
"user_data_dir": Arg(
@@ -463,7 +464,7 @@ AVAILABLE_CLI_OPTIONS = {
"format_from_trades": Arg(
"--format-from",
help="Source format for data conversion.",
choices=constants.AVAILABLE_DATAHANDLERS + ["kraken_csv"],
choices=[*constants.AVAILABLE_DATAHANDLERS, "kraken_csv"],
required=True,
),
"format_from": Arg(
@@ -527,6 +528,15 @@ AVAILABLE_CLI_OPTIONS = {
),
type=str,
),
"ui_prerelease": Arg(
"--prerelease",
help=(
"Install the latest pre-release version of FreqUI. "
"This is not recommended for production use."
),
action="store_true",
default=False,
),
# Templating options
"template": Arg(
"--template",

View File

@@ -23,8 +23,8 @@ def start_create_userdir(args: dict[str, Any]) -> None:
"""
from freqtrade.configuration.directory_operations import copy_sample_files, create_userdata_dir
if "user_data_dir" in args and args["user_data_dir"]:
userdir = create_userdata_dir(args["user_data_dir"], create_dir=True)
if user_data_dir := args.get("user_data_dir"):
userdir = create_userdata_dir(user_data_dir, create_dir=True)
copy_sample_files(userdir, overwrite=args["reset"])
else:
logger.warning("`create-userdir` requires --userdir to be set.")
@@ -85,22 +85,22 @@ def start_new_strategy(args: dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if "strategy" in args and args["strategy"]:
if "strategy_path" in args and args["strategy_path"]:
strategy_dir = Path(args["strategy_path"])
if strategy := args.get("strategy"):
if strategy_path := args.get("strategy_path"):
strategy_dir = Path(strategy_path)
else:
strategy_dir = config["user_data_dir"] / USERPATH_STRATEGIES
if not strategy_dir.is_dir():
logger.info(f"Creating strategy directory {strategy_dir}")
strategy_dir.mkdir(parents=True)
new_path = strategy_dir / (args["strategy"] + ".py")
new_path = strategy_dir / (strategy + ".py")
if new_path.exists():
raise OperationalException(
f"`{new_path}` already exists. Please choose another Strategy Name."
)
deploy_new_strategy(args["strategy"], new_path, args["template"])
deploy_new_strategy(strategy, new_path, args["template"])
else:
raise ConfigurationError("`new-strategy` requires --strategy to be set.")
@@ -116,7 +116,9 @@ def start_install_ui(args: dict[str, Any]) -> None:
dest_folder = Path(__file__).parents[1] / "rpc/api_server/ui/installed/"
# First make sure the assets are removed.
dl_url, latest_version = get_ui_download_url(args.get("ui_version"))
dl_url, latest_version = get_ui_download_url(
args.get("ui_version"), args.get("ui_prerelease", False)
)
curr_version = read_ui_version(dest_folder)
if curr_version == latest_version and not args.get("erase_ui_only"):

View File

@@ -51,7 +51,7 @@ def download_and_install_ui(dest_folder: Path, dl_url: str, version: str):
f.write(version)
def get_ui_download_url(version: str | None = None) -> tuple[str, str]:
def get_ui_download_url(version: str | None, prerelease: bool) -> tuple[str, str]:
base_url = "https://api.github.com/repos/freqtrade/frequi/"
# Get base UI Repo path
@@ -61,14 +61,18 @@ def get_ui_download_url(version: str | None = None) -> tuple[str, str]:
if version:
tmp = [x for x in r if x["name"] == version]
if tmp:
latest_version = tmp[0]["name"]
assets = tmp[0].get("assets", [])
else:
raise ValueError("UI-Version not found.")
else:
latest_version = r[0]["name"]
assets = r[0].get("assets", [])
tmp = [x for x in r if prerelease or not x.get("prerelease")]
if tmp:
# Ensure we have the latest version
if version is None:
tmp.sort(key=lambda x: x["created_at"], reverse=True)
latest_version = tmp[0]["name"]
assets = tmp[0].get("assets", [])
else:
raise ValueError("UI-Version not found.")
dl_url = ""
if assets and len(assets) > 0:
dl_url = assets[0]["browser_download_url"]

View File

@@ -5,7 +5,6 @@ from typing import Any
from freqtrade.enums import RunMode
from freqtrade.exceptions import ConfigurationError, OperationalException
from freqtrade.ft_types import ValidExchangesType
logger = logging.getLogger(__name__)
@@ -21,6 +20,7 @@ def start_list_exchanges(args: dict[str, Any]) -> None:
from rich.text import Text
from freqtrade.exchange import list_available_exchanges
from freqtrade.ft_types import ValidExchangesType
from freqtrade.loggers.rich_console import get_rich_console
available_exchanges: list[ValidExchangesType] = list_available_exchanges(

View File

@@ -0,0 +1,4 @@
from freqtrade.config_schema.config_schema import CONF_SCHEMA
__all__ = ["CONF_SCHEMA"]

View File

@@ -689,7 +689,7 @@ CONF_SCHEMA = {
"initial_state": {
"description": "Initial state of the system.",
"type": "string",
"enum": ["running", "stopped"],
"enum": ["running", "paused", "stopped"],
},
"force_entry_enable": {
"description": "Force enable entry.",
@@ -1146,6 +1146,14 @@ CONF_SCHEMA = {
"type": "boolean",
"default": False,
},
"indicator_periods_candles": {
"description": (
"Time periods to calculate indicators for. "
"The indicators are added to the base indicator dataset."
),
"type": "array",
"items": {"type": "number", "minimum": 1},
},
"use_SVM_to_remove_outliers": {
"description": "Use SVM to remove outliers from the features.",
"type": "boolean",
@@ -1338,7 +1346,8 @@ SCHEMA_BACKTEST_REQUIRED = [
"dataformat_ohlcv",
"dataformat_trades",
]
SCHEMA_BACKTEST_REQUIRED_FINAL = SCHEMA_BACKTEST_REQUIRED + [
SCHEMA_BACKTEST_REQUIRED_FINAL = [
*SCHEMA_BACKTEST_REQUIRED,
"stoploss",
"minimal_roi",
"max_open_trades",
@@ -1350,6 +1359,4 @@ SCHEMA_MINIMAL_REQUIRED = [
"dataformat_ohlcv",
"dataformat_trades",
]
SCHEMA_MINIMAL_WEBSERVER = SCHEMA_MINIMAL_REQUIRED + [
"api_server",
]
SCHEMA_MINIMAL_WEBSERVER = [*SCHEMA_MINIMAL_REQUIRED, "api_server"]

View File

@@ -6,7 +6,7 @@ from typing import Any
from jsonschema import Draft4Validator, validators
from jsonschema.exceptions import ValidationError, best_match
from freqtrade.configuration.config_schema import (
from freqtrade.config_schema.config_schema import (
CONF_SCHEMA,
SCHEMA_BACKTEST_REQUIRED,
SCHEMA_BACKTEST_REQUIRED_FINAL,
@@ -361,7 +361,7 @@ def _validate_freqai_include_timeframes(conf: dict[str, Any], preliminary: bool)
# Ensure that the base timeframe is included in the include_timeframes list
if not preliminary and main_tf not in freqai_include_timeframes:
feature_parameters = conf.get("freqai", {}).get("feature_parameters", {})
include_timeframes = [main_tf] + freqai_include_timeframes
include_timeframes = [main_tf, *freqai_include_timeframes]
conf.get("freqai", {}).get("feature_parameters", {}).update(
{**feature_parameters, "include_timeframes": include_timeframes}
)

View File

@@ -135,7 +135,7 @@ class Configuration:
{"verbosity": safe_value_fallback(self.args, "verbosity", default_value=0)}
)
if "logfile" in self.args and self.args["logfile"]:
if self.args.get("logfile"):
config.update({"logfile": self.args["logfile"]})
if "print_colorized" in self.args and not self.args["print_colorized"]:
@@ -187,7 +187,7 @@ class Configuration:
logger.warning("`force_entry_enable` RPC message enabled.")
# Support for sd_notify
if "sd_notify" in self.args and self.args["sd_notify"]:
if self.args.get("sd_notify"):
config["internals"].update({"sd_notify": True})
def _process_datadir_options(self, config: Config) -> None:
@@ -196,14 +196,14 @@ class Configuration:
--user-data, --datadir
"""
# Check exchange parameter here - otherwise `datadir` might be wrong.
if "exchange" in self.args and self.args["exchange"]:
if self.args.get("exchange"):
config["exchange"]["name"] = self.args["exchange"]
logger.info(f"Using exchange {config['exchange']['name']}")
if "pair_whitelist" not in config["exchange"]:
config["exchange"]["pair_whitelist"] = []
if "user_data_dir" in self.args and self.args["user_data_dir"]:
if self.args.get("user_data_dir"):
config.update({"user_data_dir": self.args["user_data_dir"]})
elif "user_data_dir" not in config:
# Default to cwd/user_data (legacy option ...)
@@ -251,7 +251,7 @@ class Configuration:
logstring="Parameter --enable-protections detected, enabling Protections. ...",
)
if "max_open_trades" in self.args and self.args["max_open_trades"]:
if self.args.get("max_open_trades"):
config.update({"max_open_trades": self.args["max_open_trades"]})
logger.info(
"Parameter --max-open-trades detected, overriding max_open_trades to: %s ...",
@@ -314,7 +314,7 @@ class Configuration:
self._args_to_config_loop(config, configurations)
# Edge section:
if "stoploss_range" in self.args and self.args["stoploss_range"]:
if self.args.get("stoploss_range"):
txt_range = ast.literal_eval(self.args["stoploss_range"])
config["edge"].update({"stoploss_range_min": txt_range[0]})
config["edge"].update({"stoploss_range_max": txt_range[1]})
@@ -493,7 +493,7 @@ class Configuration:
config["exchange"]["pair_whitelist"] = config["pairs"]
return
if "pairs_file" in self.args and self.args["pairs_file"]:
if self.args.get("pairs_file"):
pairs_file = Path(self.args["pairs_file"])
logger.info(f'Reading pairs file "{pairs_file}".')
# Download pairs from the pairs file if no config is specified
@@ -505,7 +505,7 @@ class Configuration:
config["pairs"].sort()
return
if "config" in self.args and self.args["config"]:
if self.args.get("config"):
logger.info("Using pairlist from configuration.")
config["pairs"] = config.get("exchange", {}).get("pair_whitelist")
else:

View File

@@ -37,7 +37,7 @@ def chown_user_directory(directory: Path) -> None:
"""
if running_in_docker():
try:
import subprocess # noqa: S404
import subprocess # noqa: S404, RUF100
subprocess.check_output(["sudo", "chown", "-R", "ftuser:", str(directory.resolve())])
except Exception:

View File

@@ -37,6 +37,7 @@ HYPEROPT_LOSS_BUILTIN = [
"CalmarHyperOptLoss",
"MaxDrawDownHyperOptLoss",
"MaxDrawDownRelativeHyperOptLoss",
"MaxDrawDownPerPairHyperOptLoss",
"ProfitDrawDownHyperOptLoss",
"MultiMetricHyperOptLoss",
]
@@ -70,6 +71,19 @@ DEFAULT_DATAFRAME_COLUMNS = ["date", "open", "high", "low", "close", "volume"]
# it has wide consequences for stored trades files
DEFAULT_TRADES_COLUMNS = ["timestamp", "id", "type", "side", "price", "amount", "cost"]
DEFAULT_ORDERFLOW_COLUMNS = ["level", "bid", "ask", "delta"]
ORDERFLOW_ADDED_COLUMNS = [
"trades",
"orderflow",
"imbalances",
"stacked_imbalances_bid",
"stacked_imbalances_ask",
"max_delta",
"min_delta",
"bid",
"ask",
"delta",
"total_trades",
]
TRADES_DTYPES = {
"timestamp": "int64",
"id": "str",
@@ -99,7 +113,7 @@ DL_DATA_TIMEFRAMES = ["1m", "5m"]
ENV_VAR_PREFIX = "FREQTRADE__"
CANCELED_EXCHANGE_STATES = ("cancelled", "canceled", "expired", "rejected")
NON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ("closed",)
NON_OPEN_EXCHANGE_STATES = (*CANCELED_EXCHANGE_STATES, "closed")
# Define decimals per coin for outputs
# Only used for outputs.

View File

@@ -0,0 +1,31 @@
# flake8: noqa: F401
from .bt_fileutils import (
BT_DATA_COLUMNS,
delete_backtest_result,
extract_trades_of_period,
find_existing_backtest_stats,
get_backtest_market_change,
get_backtest_result,
get_backtest_resultlist,
get_latest_backtest_filename,
get_latest_hyperopt_file,
get_latest_hyperopt_filename,
get_latest_optimize_filename,
load_and_merge_backtest_result,
load_backtest_analysis_data,
load_backtest_data,
load_backtest_metadata,
load_backtest_stats,
load_exit_signal_candles,
load_file_from_zip,
load_rejected_signals,
load_signal_candles,
load_trades,
load_trades_from_db,
trade_list_to_dataframe,
update_backtest_metadata,
)
from .trade_parallelism import (
analyze_trade_parallelism,
evaluate_result_multi,
)

View File

@@ -13,7 +13,7 @@ from typing import Any, Literal
import numpy as np
import pandas as pd
from freqtrade.constants import LAST_BT_RESULT_FN, IntOrInf
from freqtrade.constants import LAST_BT_RESULT_FN
from freqtrade.exceptions import ConfigurationError, OperationalException
from freqtrade.ft_types import BacktestHistoryEntryType, BacktestResultType
from freqtrade.misc import file_dump_json, json_load
@@ -52,6 +52,7 @@ BT_DATA_COLUMNS = [
"open_timestamp",
"close_timestamp",
"orders",
"funding_fees",
]
@@ -356,6 +357,8 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
df["max_stake_amount"] = df["stake_amount"]
if "orders" not in df.columns:
df["orders"] = None
if "funding_fees" not in df.columns:
df["funding_fees"] = 0.0
return df
@@ -376,7 +379,7 @@ def load_backtest_data(filename: Path | str, strategy: str | None = None) -> pd.
if not strategy:
if len(data["strategy"]) == 1:
strategy = list(data["strategy"].keys())[0]
strategy = next(iter(data["strategy"].keys()))
else:
raise ValueError(
"Detected backtest result with more than one strategy. "
@@ -491,55 +494,6 @@ def load_exit_signal_candles(backtest_dir: Path) -> dict[str, dict[str, pd.DataF
return load_backtest_analysis_data(backtest_dir, "exited")
def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataFrame:
"""
Find overlapping trades by expanding each trade once per period it was open
and then counting overlaps.
:param results: Results Dataframe - can be loaded
:param timeframe: Timeframe used for backtest
:return: dataframe with open-counts per time-period in timeframe
"""
from freqtrade.exchange import timeframe_to_resample_freq
timeframe_freq = timeframe_to_resample_freq(timeframe)
dates = [
pd.Series(
pd.date_range(
row[1]["open_date"],
row[1]["close_date"],
freq=timeframe_freq,
# Exclude right boundary - the date is the candle open date.
inclusive="left",
)
)
for row in results[["open_date", "close_date"]].iterrows()
]
deltas = [len(x) for x in dates]
dates = pd.Series(pd.concat(dates).values, name="date")
df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns)
df2 = pd.concat([dates, df2], axis=1)
df2 = df2.set_index("date")
df_final = df2.resample(timeframe_freq)[["pair"]].count()
df_final = df_final.rename({"pair": "open_trades"}, axis=1)
return df_final
def evaluate_result_multi(
results: pd.DataFrame, timeframe: str, max_open_trades: IntOrInf
) -> pd.DataFrame:
"""
Find overlapping trades by expanding each trade once per period it was open
and then counting overlaps
:param results: Results Dataframe - can be loaded
:param timeframe: Frequency used for the backtest
:param max_open_trades: parameter max_open_trades used during backtest run
:return: dataframe with open-counts per time-period in freq
"""
df_final = analyze_trade_parallelism(results, timeframe)
return df_final[df_final["open_trades"] > max_open_trades]
def trade_list_to_dataframe(trades: list[Trade] | list[LocalTrade]) -> pd.DataFrame:
"""
Convert list of Trade objects to pandas Dataframe

View File

@@ -0,0 +1,60 @@
import logging
import numpy as np
import pandas as pd
from freqtrade.constants import IntOrInf
logger = logging.getLogger(__name__)
def analyze_trade_parallelism(trades: pd.DataFrame, timeframe: str) -> pd.DataFrame:
"""
Find overlapping trades by expanding each trade once per period it was open
and then counting overlaps.
:param trades: Trades Dataframe - can be loaded from backtest, or created
via trade_list_to_dataframe
:param timeframe: Timeframe used for backtest
:return: dataframe with open-counts per time-period in timeframe
"""
from freqtrade.exchange import timeframe_to_resample_freq
timeframe_freq = timeframe_to_resample_freq(timeframe)
dates = [
pd.Series(
pd.date_range(
row[1]["open_date"],
row[1]["close_date"],
freq=timeframe_freq,
# Exclude right boundary - the date is the candle open date.
inclusive="left",
)
)
for row in trades[["open_date", "close_date"]].iterrows()
]
deltas = [len(x) for x in dates]
dates = pd.Series(pd.concat(dates).values, name="date")
df2 = pd.DataFrame(np.repeat(trades.values, deltas, axis=0), columns=trades.columns)
df2 = pd.concat([dates, df2], axis=1)
df2 = df2.set_index("date")
df_final = df2.resample(timeframe_freq)[["pair"]].count()
df_final = df_final.rename({"pair": "open_trades"}, axis=1)
return df_final
def evaluate_result_multi(
trades: pd.DataFrame, timeframe: str, max_open_trades: IntOrInf
) -> pd.DataFrame:
"""
Find overlapping trades by expanding each trade once per period it was open
and then counting overlaps
:param trades: Trades Dataframe - can be loaded from backtest, or created
via trade_list_to_dataframe
:param timeframe: Frequency used for the backtest
:param max_open_trades: parameter max_open_trades used during backtest run
:return: dataframe with open-counts per time-period in freq
"""
df_final = analyze_trade_parallelism(trades, timeframe)
return df_final[df_final["open_trades"] > max_open_trades]

View File

@@ -9,26 +9,12 @@ from datetime import datetime
import numpy as np
import pandas as pd
from freqtrade.constants import DEFAULT_ORDERFLOW_COLUMNS, Config
from freqtrade.constants import DEFAULT_ORDERFLOW_COLUMNS, ORDERFLOW_ADDED_COLUMNS, Config
from freqtrade.exceptions import DependencyException
logger = logging.getLogger(__name__)
ORDERFLOW_ADDED_COLUMNS = [
"trades",
"orderflow",
"imbalances",
"stacked_imbalances_bid",
"stacked_imbalances_ask",
"max_delta",
"min_delta",
"bid",
"ask",
"delta",
"total_trades",
]
def _init_dataframe_with_trades_columns(dataframe: pd.DataFrame):
"""

View File

@@ -281,7 +281,7 @@ def _merge_dfs(
):
merge_on = ["pair", "open_date"]
signal_wide_indicators = list(set(available_inds) - set(BT_DATA_COLUMNS))
columns_to_keep = merge_on + ["enter_reason", "exit_reason"]
columns_to_keep = [*merge_on, "enter_reason", "exit_reason"]
if exit_df is None or exit_df.empty or entry_only is True:
return entry_df[columns_to_keep + available_inds]

View File

@@ -116,7 +116,7 @@ def load_data(
result[pair] = hist
else:
if candle_type is CandleType.FUNDING_RATE and user_futures_funding_rate is not None:
logger.warn(f"{pair} using user specified [{user_futures_funding_rate}]")
logger.warning(f"{pair} using user specified [{user_futures_funding_rate}]")
elif candle_type not in (CandleType.SPOT, CandleType.FUTURES):
result[pair] = DataFrame(columns=["date", "open", "close", "high", "low", "volume"])

View File

@@ -10,7 +10,9 @@ import pandas as pd
logger = logging.getLogger(__name__)
def calculate_market_change(data: dict[str, pd.DataFrame], column: str = "close") -> float:
def calculate_market_change(
data: dict[str, pd.DataFrame], column: str = "close", min_date: datetime | None = None
) -> float:
"""
Calculate market change based on "column".
Calculation is done by taking the first non-null and the last non-null element of each column
@@ -19,14 +21,24 @@ def calculate_market_change(data: dict[str, pd.DataFrame], column: str = "close"
:param data: Dict of Dataframes, dict key should be pair.
:param column: Column in the original dataframes to use
:param min_date: Minimum date to consider for calculations. Market change should only be
calculated for data actually backtested, excluding startup periods.
:return:
"""
tmp_means = []
for pair, df in data.items():
start = df[column].dropna().iloc[0]
end = df[column].dropna().iloc[-1]
df1 = df
if min_date is not None:
df1 = df1[df1["date"] >= min_date]
if df1.empty:
logger.warning(f"Pair {pair} has no data after {min_date}.")
continue
start = df1[column].dropna().iloc[0]
end = df1[column].dropna().iloc[-1]
tmp_means.append((end - start) / start)
if not tmp_means:
return 0.0
return float(np.mean(tmp_means))
@@ -118,7 +130,7 @@ def _calc_drawdown_series(
) -> pd.DataFrame:
max_drawdown_df = pd.DataFrame()
max_drawdown_df["cumulative"] = profit_results[value_col].cumsum()
max_drawdown_df["high_value"] = max_drawdown_df["cumulative"].cummax()
max_drawdown_df["high_value"] = np.maximum(0, max_drawdown_df["cumulative"].cummax())
max_drawdown_df["drawdown"] = max_drawdown_df["cumulative"] - max_drawdown_df["high_value"]
max_drawdown_df["date"] = profit_results.loc[:, date_col]
if starting_balance:
@@ -201,13 +213,11 @@ def calculate_max_drawdown(
if relative
else max_drawdown_df["drawdown"].idxmin()
)
if idxmin == 0:
raise ValueError("No losing trade, therefore no drawdown.")
high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]["high_value"].idxmax(), date_col]
high_idx = max_drawdown_df.iloc[: idxmin + 1]["high_value"].idxmax()
high_date = profit_results.loc[high_idx, date_col]
low_date = profit_results.loc[idxmin, date_col]
high_val = max_drawdown_df.loc[
max_drawdown_df.iloc[:idxmin]["high_value"].idxmax(), "cumulative"
]
high_val = max_drawdown_df.loc[high_idx, "cumulative"]
low_val = max_drawdown_df.loc[idxmin, "cumulative"]
max_drawdown_rel = max_drawdown_df.loc[idxmin, "drawdown_relative"]

View File

@@ -7,8 +7,9 @@ class State(Enum):
"""
RUNNING = 1
STOPPED = 2
RELOAD_CONFIG = 3
PAUSED = 2
STOPPED = 3
RELOAD_CONFIG = 4
def __str__(self):
return f"{self.name.lower()}"

View File

@@ -143,7 +143,7 @@ class Binance(Exchange):
Does not work for other exchanges, which don't return the earliest data when called with "0"
:param candle_type: Any of the enum CandleType (must match trading mode!)
"""
if is_new_pair:
if is_new_pair and candle_type in (CandleType.SPOT, CandleType.FUTURES, CandleType.MARK):
with self._loop_lock:
x = self.loop.run_until_complete(
self._async_get_candle_history(pair, timeframe, candle_type, 0)

File diff suppressed because it is too large Load Diff

View File

@@ -148,6 +148,7 @@ class Exchange:
"trades_has_history": False,
"l2_limit_range": None,
"l2_limit_range_required": True, # Allow Empty L2 limit (kucoin)
"l2_limit_upper": None, # Upper limit for L2 limit
"mark_ohlcv_price": "mark",
"mark_ohlcv_timeframe": "8h",
"funding_fee_timeframe": "8h",
@@ -960,7 +961,7 @@ class Exchange:
return 1 / pow(10, precision)
def get_min_pair_stake_amount(
self, pair: str, price: float, stoploss: float, leverage: float | None = 1.0
self, pair: str, price: float, stoploss: float, leverage: float = 1.0
) -> float | None:
return self._get_stake_amount_limit(pair, price, stoploss, "min", leverage)
@@ -979,7 +980,7 @@ class Exchange:
price: float,
stoploss: float,
limit: Literal["min", "max"],
leverage: float | None = 1.0,
leverage: float = 1.0,
) -> float | None:
isMin = limit == "min"
@@ -988,6 +989,8 @@ class Exchange:
except KeyError:
raise ValueError(f"Can't get market information for symbol {pair}")
stake_limits = []
limits = market["limits"]
if isMin:
# reserve some percent defined in config (5% default) + stoploss
margin_reserve: float = 1.0 + self._config.get(
@@ -997,11 +1000,12 @@ class Exchange:
# it should not be more than 50%
stoploss_reserve = max(min(stoploss_reserve, 1.5), 1)
else:
# is_max
margin_reserve = 1.0
stoploss_reserve = 1.0
if max_from_tiers := self._get_max_notional_from_tiers(pair, leverage=leverage):
stake_limits.append(max_from_tiers)
stake_limits = []
limits = market["limits"]
if limits["cost"][limit] is not None:
stake_limits.append(
self._contracts_to_amount(pair, limits["cost"][limit]) * stoploss_reserve
@@ -1365,8 +1369,8 @@ class Exchange:
ordertype = available_order_Types[user_order_type]
else:
# Otherwise pick only one available
ordertype = list(available_order_Types.values())[0]
user_order_type = list(available_order_Types.keys())[0]
ordertype = next(iter(available_order_Types.values()))
user_order_type = next(iter(available_order_Types.keys()))
return ordertype, user_order_type
def _get_stop_limit_rate(self, stop_price: float, order_types: dict, side: str) -> float:
@@ -1955,14 +1959,18 @@ class Exchange:
@staticmethod
def get_next_limit_in_list(
limit: int, limit_range: list[int] | None, range_required: bool = True
limit: int,
limit_range: list[int] | None,
range_required: bool = True,
upper_limit: int | None = None,
):
"""
Get next greater value in the list.
Used by fetch_l2_order_book if the api only supports a limited range
if both limit_range and upper_limit is provided, limit_range wins.
"""
if not limit_range:
return limit
return min(limit, upper_limit) if upper_limit else limit
result = min([x for x in limit_range if limit <= x] + [max(limit_range)])
if not range_required and limit > result:
@@ -1979,7 +1987,10 @@ class Exchange:
{'asks': [price, volume], 'bids': [price, volume]}
"""
limit1 = self.get_next_limit_in_list(
limit, self._ft_has["l2_limit_range"], self._ft_has["l2_limit_range_required"]
limit,
self._ft_has["l2_limit_range"],
self._ft_has["l2_limit_range_required"],
self._ft_has["l2_limit_upper"],
)
try:
return self._api.fetch_l2_order_book(pair, limit1)
@@ -2790,7 +2801,7 @@ class Exchange:
pair, timeframe, candle_type = pairwt
since_ms = None
new_ticks: list = []
all_stored_ticks_df = DataFrame(columns=DEFAULT_TRADES_COLUMNS + ["date"])
all_stored_ticks_df = DataFrame(columns=[*DEFAULT_TRADES_COLUMNS, "date"])
first_candle_ms = self.needed_candle_for_trades_ms(timeframe, candle_type)
# refresh, if
# a. not in _trades
@@ -2835,7 +2846,7 @@ class Exchange:
else:
# Skip cache, it's too old
all_stored_ticks_df = DataFrame(
columns=DEFAULT_TRADES_COLUMNS + ["date"]
columns=[*DEFAULT_TRADES_COLUMNS, "date"]
)
# from_id overrules with exchange set to id paginate
@@ -3353,42 +3364,22 @@ class Exchange:
pair_tiers = self._leverage_tiers[pair]
if stake_amount == 0:
return self._leverage_tiers[pair][0]["maxLeverage"] # Max lev for lowest amount
return pair_tiers[0]["maxLeverage"] # Max lev for lowest amount
for tier_index in range(len(pair_tiers)):
tier = pair_tiers[tier_index]
lev = tier["maxLeverage"]
# Find the appropriate tier based on stake_amount
prior_max_lev = None
for tier in pair_tiers:
min_stake = tier["minNotional"] / (prior_max_lev or tier["maxLeverage"])
max_stake = tier["maxNotional"] / tier["maxLeverage"]
prior_max_lev = tier["maxLeverage"]
# Adjust notional by leverage to do a proper comparison
if min_stake <= stake_amount <= max_stake:
return tier["maxLeverage"]
if tier_index < len(pair_tiers) - 1:
next_tier = pair_tiers[tier_index + 1]
next_floor = next_tier["minNotional"] / next_tier["maxLeverage"]
if next_floor > stake_amount: # Next tier min too high for stake amount
return min((tier["maxNotional"] / stake_amount), lev)
#
# With the two leverage tiers below,
# - a stake amount of 150 would mean a max leverage of (10000 / 150) = 66.66
# - stakes below 133.33 = max_lev of 75
# - stakes between 133.33-200 = max_lev of 10000/stake = 50.01-74.99
# - stakes from 200 + 1000 = max_lev of 50
#
# {
# "min": 0, # stake = 0.0
# "max": 10000, # max_stake@75 = 10000/75 = 133.33333333333334
# "lev": 75,
# },
# {
# "min": 10000, # stake = 200.0
# "max": 50000, # max_stake@50 = 50000/50 = 1000.0
# "lev": 50,
# }
#
else: # if on the last tier
if stake_amount > tier["maxNotional"]:
# If stake is > than max tradeable amount
raise InvalidOrderException(f"Amount {stake_amount} too high for {pair}")
else:
return tier["maxLeverage"]
# else: # if on the last tier
if stake_amount > max_stake:
# If stake is > than max tradeable amount
raise InvalidOrderException(f"Amount {stake_amount} too high for {pair}")
raise OperationalException(
"Looped through all tiers without finding a max leverage. Should never be reached"
@@ -3403,6 +3394,23 @@ class Exchange:
else:
return 1.0
def _get_max_notional_from_tiers(self, pair: str, leverage: float) -> float | None:
"""
get max_notional from leverage_tiers
:param pair: The base/quote currency pair being traded
:param leverage: The leverage to be used
:return: The maximum notional value for the given leverage or None if not found
"""
if self.trading_mode != TradingMode.FUTURES:
return None
if pair not in self._leverage_tiers:
return None
pair_tiers = self._leverage_tiers[pair]
for tier in reversed(pair_tiers):
if leverage <= tier["maxLeverage"]:
return tier["maxNotional"]
return None
@retrier
def _set_leverage(
self,

View File

@@ -37,6 +37,7 @@ class FtHas(TypedDict, total=False):
# Orderbook
l2_limit_range: list[int] | None
l2_limit_range_required: bool
l2_limit_upper: int | None
# Futures
ccxt_futures_name: str # usually swap
mark_ohlcv_price: str
@@ -44,6 +45,7 @@ class FtHas(TypedDict, total=False):
funding_fee_timeframe: str
funding_fee_candle_limit: int
floor_leverage: bool
uses_leverage_tiers: bool
needs_trading_fees: bool
order_props_in_contracts: list[Literal["amount", "cost", "filled", "remaining"]]

View File

@@ -35,6 +35,7 @@ class Gate(Exchange):
"stoploss_order_types": {"limit": "limit"},
"stop_price_param": "stopPrice",
"stop_price_prop": "stopPrice",
"l2_limit_upper": 1000,
"marketOrderRequiresPrice": True,
"trades_has_history": False, # Endpoint would support this - but ccxt doesn't.
}
@@ -44,6 +45,7 @@ class Gate(Exchange):
"marketOrderRequiresPrice": False,
"funding_fee_candle_limit": 90,
"stop_price_type_field": "price_type",
"l2_limit_upper": 300,
"stop_price_type_value_mapping": {
PriceType.LAST: 0,
PriceType.MARK: 1,

View File

@@ -35,6 +35,7 @@ class Hyperliquid(Exchange):
"stop_price_prop": "stopPrice",
"funding_fee_timeframe": "1h",
"funding_fee_candle_limit": 500,
"uses_leverage_tiers": False,
}
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [

View File

@@ -69,7 +69,7 @@ class Kraken(Exchange):
consolidated: CcxtBalances = {}
for currency, balance in balances.items():
base_currency = currency[:-2] if currency.endswith(".F") else currency
base_currency = self._api.commonCurrencies.get(base_currency, base_currency)
if base_currency in consolidated:
consolidated[base_currency]["free"] += balance["free"]
consolidated[base_currency]["used"] += balance["used"]

View File

@@ -569,7 +569,7 @@ class FreqaiDataDrawer:
dk.training_features_list = dk.data["training_features_list"]
dk.label_list = dk.data["label_list"]
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any: # noqa: C901
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any:
"""
loads all data required to make a prediction on a sub-train time range
:returns:

View File

@@ -16,7 +16,7 @@ from pandas import DataFrame
from sklearn.model_selection import train_test_split
from freqtrade.configuration import TimeRange
from freqtrade.constants import DOCS_LINK, Config
from freqtrade.constants import DOCS_LINK, ORDERFLOW_ADDED_COLUMNS, Config
from freqtrade.data.converter import reduce_dataframe_footprint
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds
@@ -709,6 +709,11 @@ class FreqaiDataKitchen:
skip_columns = [
(f"{s}_{suffix}") for s in ["date", "open", "high", "low", "close", "volume"]
]
for s in ORDERFLOW_ADDED_COLUMNS:
if s in dataframe.columns and f"{s}_{suffix}" in dataframe.columns:
skip_columns.append(f"{s}_{suffix}")
dataframe = dataframe.drop(columns=skip_columns)
return dataframe

View File

@@ -4,8 +4,8 @@ from typing import Any
from xgboost import XGBRegressor
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
from freqtrade.freqai.base_models.FreqaiMultiOutputRegressor import FreqaiMultiOutputRegressor
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.tensorboard import TBCallback
logger = logging.getLogger(__name__)
@@ -19,6 +19,7 @@ class XGBoostRegressorMultiTarget(BaseRegressionModel):
`predict()` methods to add their custom data handling tools or change
various aspects of the training that cannot be configured via the
top level config.json file.
This is an exact copy of XGBoostRegressor kept for compatibility reasons.
"""
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
@@ -29,45 +30,32 @@ class XGBoostRegressorMultiTarget(BaseRegressionModel):
:param dk: The datakitchen object for the current coin/model
"""
xgb = XGBRegressor(**self.model_training_parameters)
X = data_dictionary["train_features"]
y = data_dictionary["train_labels"]
if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0:
eval_set = None
eval_weights = None
else:
eval_set = [(data_dictionary["test_features"], data_dictionary["test_labels"]), (X, y)]
eval_weights = [data_dictionary["test_weights"], data_dictionary["train_weights"]]
sample_weight = data_dictionary["train_weights"]
eval_weights = None
eval_sets = [None] * y.shape[1]
xgb_model = self.get_init_model(dk.pair)
if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0:
eval_weights = [data_dictionary["test_weights"]]
for i in range(data_dictionary["test_labels"].shape[1]):
eval_sets[i] = [ # type: ignore
(
data_dictionary["test_features"],
data_dictionary["test_labels"].iloc[:, i],
)
]
model = XGBRegressor(**self.model_training_parameters)
init_model = self.get_init_model(dk.pair)
if init_model:
init_models = init_model.estimators_
else:
init_models = [None] * y.shape[1]
fit_params = []
for i in range(len(eval_sets)):
fit_params.append(
{
"eval_set": eval_sets[i],
"sample_weight_eval_set": eval_weights,
"xgb_model": init_models[i],
}
)
model = FreqaiMultiOutputRegressor(estimator=xgb)
thread_training = self.freqai_info.get("multitarget_parallel_training", False)
if thread_training:
model.n_jobs = y.shape[1]
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
model.set_params(callbacks=[TBCallback(dk.data_path)])
model.fit(
X=X,
y=y,
sample_weight=sample_weight,
eval_set=eval_set,
sample_weight_eval_set=eval_weights,
xgb_model=xgb_model,
)
# set the callbacks to empty so that we can serialize to disk later
model.set_params(callbacks=[])
return model

View File

@@ -293,6 +293,7 @@ class FreqtradeBot(LoggingMixin):
trades = Trade.get_open_trades()
# First process current opened trades (positions)
self.exit_positions(trades)
Trade.commit()
# Check if we need to adjust our current positions before attempting to enter new trades.
if self.strategy.position_adjustment_enable:
@@ -300,7 +301,7 @@ class FreqtradeBot(LoggingMixin):
self.process_open_trade_positions()
# Then looking for entry opportunities
if self.get_free_open_trades():
if self.state == State.RUNNING and self.get_free_open_trades():
self.enter_positions()
self._schedule.run_pending()
Trade.commit()
@@ -759,12 +760,14 @@ class FreqtradeBot(LoggingMixin):
current_exit_profit = trade.calc_profit_ratio(current_exit_rate)
min_entry_stake = self.exchange.get_min_pair_stake_amount(
trade.pair, current_entry_rate, 0.0
trade.pair, current_entry_rate, 0.0, trade.leverage
)
min_exit_stake = self.exchange.get_min_pair_stake_amount(
trade.pair, current_exit_rate, self.strategy.stoploss
trade.pair, current_exit_rate, self.strategy.stoploss, trade.leverage
)
max_entry_stake = self.exchange.get_max_pair_stake_amount(
trade.pair, current_entry_rate, trade.leverage
)
max_entry_stake = self.exchange.get_max_pair_stake_amount(trade.pair, current_entry_rate)
stake_available = self.wallets.get_available_stake_amount()
logger.debug(f"Calling adjust_trade_position for pair {trade.pair}")
stake_amount, order_tag = self.strategy._adjust_trade_position_internal(
@@ -781,6 +784,10 @@ class FreqtradeBot(LoggingMixin):
)
if stake_amount is not None and stake_amount > 0.0:
if self.state == State.PAUSED:
logger.debug("Position adjustment aborted because the bot is in PAUSED state")
return
# We should increase our position
if self.strategy.max_entry_position_adjustment > -1:
count_of_entries = trade.nr_of_successful_entries

View File

@@ -1,8 +1,11 @@
# flake8: noqa: F401
from freqtrade.ft_types.backtest_result_type import (
BacktestContentType,
BacktestContentTypeIcomplete,
BacktestHistoryEntryType,
BacktestMetadataType,
BacktestResultType,
get_BacktestResultType_default,
)
from freqtrade.ft_types.plot_annotation_type import AnnotationType
from freqtrade.ft_types.valid_exchanges_type import ValidExchangesType

View File

@@ -1,8 +1,11 @@
from copy import deepcopy
from typing import Any, cast
from pandas import DataFrame
from typing_extensions import TypedDict
from freqtrade.constants import Config
class BacktestMetadataType(TypedDict):
run_id: str
@@ -36,3 +39,23 @@ class BacktestHistoryEntryType(BacktestMetadataType):
backtest_end_ts: int | None
timeframe: str | None
timeframe_detail: str | None
class BacktestContentTypeIcomplete(TypedDict, total=False):
results: DataFrame
config: Config
locks: Any
rejected_signals: int
timedout_entry_orders: int
timedout_exit_orders: int
canceled_trade_entries: int
canceled_entry_orders: int
replaced_entry_orders: int
final_balance: float
backtest_start_time: int
backtest_end_time: int
run_id: str
class BacktestContentType(BacktestContentTypeIcomplete, total=True):
pass

View File

@@ -0,0 +1,18 @@
from datetime import datetime
from typing import Literal
from pydantic import TypeAdapter
from typing_extensions import Required, TypedDict
class AnnotationType(TypedDict, total=False):
type: Required[Literal["area"]]
start: str | datetime
end: str | datetime
y_start: float
y_end: float
color: str
label: str
AnnotationTypeTA = TypeAdapter(AnnotationType)

View File

@@ -123,7 +123,7 @@ def _add_formatter(log_config: dict[str, Any], format_name: str, format_: str):
def _create_log_config(config: Config) -> dict[str, Any]:
# Get log_config from user config or use default
log_config = config.get("log_config", deepcopy(FT_LOGGING_CONFIG))
log_config = deepcopy(config.get("log_config", FT_LOGGING_CONFIG))
if logfile := config.get("logfile"):
s = logfile.split(":")

View File

@@ -20,12 +20,13 @@ class LoggingMixin:
self.refresh_period = refresh_period
self._log_cache: TTLCache = TTLCache(maxsize=1024, ttl=self.refresh_period)
def log_once(self, message: str, logmethod: Callable) -> None:
def log_once(self, message: str, logmethod: Callable, force_show: bool = False) -> None:
"""
Logs message - not more often than "refresh_period" to avoid log spamming
Logs the log-message as debug as well to simplify debugging.
:param message: String containing the message to be sent to the function.
:param logmethod: Function that'll be called. Most likely `logger.info`.
:param force_show: If True, sends the message regardless of show_output value.
:return: None.
"""
@@ -35,6 +36,7 @@ class LoggingMixin:
# Log as debug first
self.logger.debug(message)
# Call hidden function.
if self.show_output:
# Call hidden function if show_output is True or force_show is True
if self.show_output or force_show:
_log_once(message)

View File

@@ -129,7 +129,6 @@ class LookaheadAnalysis(BaseAnalysis):
backtesting._set_strategy(backtesting.strategylist[0])
varholder.data, varholder.timerange = backtesting.load_bt_data()
backtesting.load_bt_data_detail()
varholder.timeframe = backtesting.timeframe
varholder.indicators = backtesting.strategy.advise_all_indicators(varholder.data)

View File

@@ -149,7 +149,6 @@ class RecursiveAnalysis(BaseAnalysis):
backtesting._set_strategy(backtesting.strategylist[0])
varholder.data, varholder.timerange = backtesting.load_bt_data()
backtesting.load_bt_data_detail()
varholder.timeframe = backtesting.timeframe
varholder.indicators = backtesting.strategy.advise_all_indicators(varholder.data)

View File

@@ -8,7 +8,6 @@ import logging
from collections import defaultdict
from copy import deepcopy
from datetime import datetime, timedelta
from typing import Any
from numpy import nan
from pandas import DataFrame
@@ -37,7 +36,12 @@ from freqtrade.exchange import (
timeframe_to_seconds,
)
from freqtrade.exchange.exchange import Exchange
from freqtrade.ft_types import BacktestResultType, get_BacktestResultType_default
from freqtrade.ft_types import (
BacktestContentType,
BacktestContentTypeIcomplete,
BacktestResultType,
get_BacktestResultType_default,
)
from freqtrade.leverage.liquidation_price import update_liquidation_prices
from freqtrade.mixins import LoggingMixin
from freqtrade.optimize.backtest_caching import get_strategy_run_id
@@ -119,7 +123,7 @@ class Backtesting:
config["dry_run"] = True
self.run_ids: dict[str, str] = {}
self.strategylist: list[IStrategy] = []
self.all_results: dict[str, dict] = {}
self.all_bt_content: dict[str, BacktestContentType] = {}
self.analysis_results: dict[str, dict[str, DataFrame]] = {
"signals": {},
"rejected": {},
@@ -311,9 +315,10 @@ class Backtesting:
)
self.progress.set_new_value(1)
self._load_bt_data_detail()
return data, self.timerange
def load_bt_data_detail(self) -> None:
def _load_bt_data_detail(self) -> None:
"""
Loads backtest detail data (smaller timeframe) if necessary.
"""
@@ -360,8 +365,9 @@ class Backtesting:
)
# Combine data to avoid combining the data per trade.
unavailable_pairs = []
uses_leverage_tiers = self.exchange.get_option("uses_leverage_tiers", True)
for pair in self.pairlists.whitelist:
if pair not in self.exchange._leverage_tiers:
if uses_leverage_tiers and pair not in self.exchange._leverage_tiers:
unavailable_pairs.append(pair)
continue
@@ -1610,7 +1616,9 @@ class Backtesting:
yield current_time_det, pair, row, is_last_row, trade_dir
self.progress.increment()
def backtest(self, processed: dict, start_date: datetime, end_date: datetime) -> dict[str, Any]:
def backtest(
self, processed: dict, start_date: datetime, end_date: datetime
) -> BacktestContentTypeIcomplete:
"""
Implement backtesting functionality
@@ -1710,7 +1718,7 @@ class Backtesting:
"backtest_end_time": int(backtest_end_time.timestamp()),
}
)
self.all_results[strategy_name] = results
self.all_bt_content[strategy_name] = results
if (
self.config.get("export", "none") == "signals"
@@ -1760,7 +1768,6 @@ class Backtesting:
data: dict[str, DataFrame] = {}
data, timerange = self.load_bt_data()
self.load_bt_data_detail()
logger.info("Dataload complete. Calculating indicators")
self.load_prior_backtest()
@@ -1773,9 +1780,9 @@ class Backtesting:
min_date, max_date = self.backtest_one_strategy(strat, data, timerange)
# Update old results with new ones.
if len(self.all_results) > 0:
if len(self.all_bt_content) > 0:
results = generate_backtest_stats(
data, self.all_results, min_date=min_date, max_date=max_date
data, self.all_bt_content, min_date=min_date, max_date=max_date
)
if self.results:
self.results["metadata"].update(results["metadata"])

View File

@@ -22,6 +22,7 @@ from freqtrade.data.history import get_timerange
from freqtrade.data.metrics import calculate_market_change
from freqtrade.enums import HyperoptState
from freqtrade.exceptions import OperationalException
from freqtrade.ft_types import BacktestContentType
from freqtrade.misc import deep_merge_dicts, round_dict
from freqtrade.optimize.backtesting import Backtesting
@@ -337,7 +338,7 @@ class HyperOptimizer:
def _get_results_dict(
self,
backtesting_results: dict[str, Any],
backtesting_results: BacktestContentType,
min_date: datetime,
max_date: datetime,
params_dict: dict[str, Any],
@@ -443,7 +444,6 @@ class HyperOptimizer:
def prepare_hyperopt_data(self) -> None:
HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
data, self.timerange = self.backtesting.load_bt_data()
self.backtesting.load_bt_data_detail()
logger.info("Dataload complete. Calculating indicators")
if not self.analyze_per_epoch:

Some files were not shown because too many files have changed in this diff Show More