Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70dfa1435b | ||
|
|
98fc5b6e65 | ||
|
|
c126c26501 | ||
|
|
2159059b87 | ||
|
|
f0f4faca71 | ||
|
|
0bc647dbd9 | ||
|
|
e3efb72efe | ||
|
|
a9ef63cb20 | ||
|
|
3b0daff2a2 | ||
|
|
67bd4f08e6 | ||
|
|
4c2d291eaf | ||
|
|
85df7faa98 | ||
|
|
8d3ed03184 | ||
|
|
f5870a7540 |
21
.devcontainer/Dockerfile
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
FROM freqtradeorg/freqtrade:develop
|
||||||
|
|
||||||
|
USER root
|
||||||
|
# Install dependencies
|
||||||
|
COPY requirements-dev.txt /freqtrade/
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get -y install git mercurial sudo vim build-essential \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& mkdir -p /home/ftuser/.vscode-server /home/ftuser/.vscode-server-insiders /home/ftuser/commandhistory \
|
||||||
|
&& echo "export PROMPT_COMMAND='history -a'" >> /home/ftuser/.bashrc \
|
||||||
|
&& echo "export HISTFILE=~/commandhistory/.bash_history" >> /home/ftuser/.bashrc \
|
||||||
|
&& chown ftuser:ftuser -R /home/ftuser/.local/ \
|
||||||
|
&& chown ftuser: -R /home/ftuser/
|
||||||
|
|
||||||
|
USER ftuser
|
||||||
|
|
||||||
|
RUN pip install --user autopep8 -r docs/requirements-docs.txt -r requirements-dev.txt --no-cache-dir
|
||||||
|
|
||||||
|
# Empty the ENTRYPOINT to allow all commands
|
||||||
|
ENTRYPOINT []
|
||||||
@@ -1,44 +1,39 @@
|
|||||||
{
|
{
|
||||||
"name": "freqtrade Develop",
|
"name": "freqtrade Develop",
|
||||||
"image": "ghcr.io/freqtrade/freqtrade-devcontainer:latest",
|
"build": {
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"context": ".."
|
||||||
|
},
|
||||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||||
"forwardPorts": [
|
"forwardPorts": [
|
||||||
8080
|
8080
|
||||||
],
|
],
|
||||||
"workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/freqtrade,type=bind,consistency=cached",
|
"mounts": [
|
||||||
|
"source=freqtrade-bashhistory,target=/home/ftuser/commandhistory,type=volume"
|
||||||
|
],
|
||||||
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
|
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
|
||||||
"remoteUser": "ftuser",
|
"remoteUser": "ftuser",
|
||||||
"onCreateCommand": "pip install --user -e .",
|
|
||||||
"postCreateCommand": "freqtrade create-userdir --userdir user_data/",
|
"postCreateCommand": "freqtrade create-userdir --userdir user_data/",
|
||||||
"workspaceFolder": "/workspaces/freqtrade",
|
|
||||||
"customizations": {
|
"workspaceFolder": "/freqtrade/",
|
||||||
"vscode": {
|
|
||||||
"settings": {
|
"settings": {
|
||||||
"terminal.integrated.shell.linux": "/bin/bash",
|
"terminal.integrated.shell.linux": "/bin/bash",
|
||||||
"editor.insertSpaces": true,
|
"editor.insertSpaces": true,
|
||||||
"files.trimTrailingWhitespace": true,
|
"files.trimTrailingWhitespace": true,
|
||||||
"[markdown]": {
|
"[markdown]": {
|
||||||
"files.trimTrailingWhitespace": false
|
"files.trimTrailingWhitespace": false,
|
||||||
},
|
},
|
||||||
"python.pythonPath": "/usr/local/bin/python",
|
"python.pythonPath": "/usr/local/bin/python",
|
||||||
"[python]": {
|
|
||||||
"editor.codeActionsOnSave": {
|
|
||||||
"source.organizeImports": "explicit"
|
|
||||||
},
|
|
||||||
"editor.formatOnSave": true,
|
|
||||||
"editor.defaultFormatter": "charliermarsh.ruff"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
|
|
||||||
// Add the IDs of extensions you want installed when the container is created.
|
// Add the IDs of extensions you want installed when the container is created.
|
||||||
"extensions": [
|
"extensions": [
|
||||||
"ms-python.python",
|
"ms-python.python",
|
||||||
"ms-python.vscode-pylance",
|
"ms-python.vscode-pylance",
|
||||||
"charliermarsh.ruff",
|
|
||||||
"davidanson.vscode-markdownlint",
|
"davidanson.vscode-markdownlint",
|
||||||
"ms-azuretools.vscode-docker",
|
"ms-azuretools.vscode-docker",
|
||||||
"vscode-icons-team.vscode-icons",
|
"vscode-icons-team.vscode-icons",
|
||||||
"github.vscode-github-actions",
|
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
21
.github/.devcontainer/Dockerfile
vendored
@@ -1,21 +0,0 @@
|
|||||||
FROM freqtradeorg/freqtrade:develop_freqairl
|
|
||||||
|
|
||||||
USER root
|
|
||||||
# Install dependencies
|
|
||||||
COPY requirements-dev.txt /freqtrade/
|
|
||||||
|
|
||||||
ARG USERNAME=ftuser
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get -y install --no-install-recommends apt-utils dialog git ssh vim build-essential zsh \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& mkdir -p /home/${USERNAME}/.vscode-server /home/${USERNAME}/.vscode-server-insiders /home/${USERNAME}/commandhistory \
|
|
||||||
&& chown ${USERNAME}:${USERNAME} -R /home/${USERNAME}/.local/ \
|
|
||||||
&& chown ${USERNAME}: -R /home/${USERNAME}/
|
|
||||||
|
|
||||||
USER ftuser
|
|
||||||
|
|
||||||
RUN pip install --user autopep8 -r docs/requirements-docs.txt -r requirements-dev.txt --no-cache-dir
|
|
||||||
|
|
||||||
# Empty the ENTRYPOINT to allow all commands
|
|
||||||
ENTRYPOINT []
|
|
||||||
12
.github/.devcontainer/devcontainer.json
vendored
@@ -1,12 +0,0 @@
|
|||||||
{
|
|
||||||
"name": "freqtrade Dev container image builder",
|
|
||||||
"build": {
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"context": "../../"
|
|
||||||
},
|
|
||||||
"features": {
|
|
||||||
"ghcr.io/devcontainers/features/common-utils:2": {
|
|
||||||
},
|
|
||||||
"ghcr.io/stuartleeks/dev-container-features/shell-history:0.0.3": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -20,7 +20,7 @@ Please do not use bug reports to request new features.
|
|||||||
* Operating system: ____
|
* Operating system: ____
|
||||||
* Python Version: _____ (`python -V`)
|
* Python Version: _____ (`python -V`)
|
||||||
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
||||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker)
|
* Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||||
|
|
||||||
Note: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.
|
Note: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.
|
||||||
|
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -18,7 +18,7 @@ Have you search for this feature before requesting it? It's highly likely that a
|
|||||||
* Operating system: ____
|
* Operating system: ____
|
||||||
* Python Version: _____ (`python -V`)
|
* Python Version: _____ (`python -V`)
|
||||||
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
||||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker)
|
* Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||||
|
|
||||||
|
|
||||||
## Describe the enhancement
|
## Describe the enhancement
|
||||||
|
|||||||
2
.github/ISSUE_TEMPLATE/question.md
vendored
@@ -18,7 +18,7 @@ Please do not use the question template to report bugs or to request new feature
|
|||||||
* Operating system: ____
|
* Operating system: ____
|
||||||
* Python Version: _____ (`python -V`)
|
* Python Version: _____ (`python -V`)
|
||||||
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
* CCXT version: _____ (`pip freeze | grep ccxt`)
|
||||||
* Freqtrade Version: ____ (`freqtrade -V` or `docker compose run --rm freqtrade -V` for Freqtrade running in docker)
|
* Freqtrade Version: ____ (`freqtrade -V` or `docker-compose run --rm freqtrade -V` for Freqtrade running in docker)
|
||||||
|
|
||||||
## Your question
|
## Your question
|
||||||
|
|
||||||
|
|||||||
21
.github/dependabot.yml
vendored
@@ -1,34 +1,17 @@
|
|||||||
version: 2
|
version: 2
|
||||||
updates:
|
updates:
|
||||||
- package-ecosystem: docker
|
- package-ecosystem: docker
|
||||||
directories:
|
directory: "/"
|
||||||
- "/"
|
|
||||||
- "/docker"
|
|
||||||
schedule:
|
schedule:
|
||||||
interval: daily
|
interval: daily
|
||||||
ignore:
|
|
||||||
- dependency-name: "*"
|
|
||||||
update-types: ["version-update:semver-major"]
|
|
||||||
open-pull-requests-limit: 10
|
open-pull-requests-limit: 10
|
||||||
|
|
||||||
- package-ecosystem: pip
|
- package-ecosystem: pip
|
||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: weekly
|
interval: weekly
|
||||||
time: "03:00"
|
open-pull-requests-limit: 10
|
||||||
timezone: "Etc/UTC"
|
|
||||||
open-pull-requests-limit: 15
|
|
||||||
target-branch: develop
|
target-branch: develop
|
||||||
groups:
|
|
||||||
types:
|
|
||||||
patterns:
|
|
||||||
- "types-*"
|
|
||||||
pytest:
|
|
||||||
patterns:
|
|
||||||
- "pytest*"
|
|
||||||
mkdocs:
|
|
||||||
patterns:
|
|
||||||
- "mkdocs*"
|
|
||||||
|
|
||||||
- package-ecosystem: "github-actions"
|
- package-ecosystem: "github-actions"
|
||||||
directory: "/"
|
directory: "/"
|
||||||
|
|||||||
47
.github/workflows/binance-lev-tier-update.yml
vendored
@@ -1,47 +0,0 @@
|
|||||||
name: Binance Leverage tiers update
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * 4"
|
|
||||||
# on demand
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
auto-update:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
environment:
|
|
||||||
name: develop
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.12"
|
|
||||||
|
|
||||||
- name: Install ccxt
|
|
||||||
run: pip install ccxt
|
|
||||||
|
|
||||||
- name: Run leverage tier update
|
|
||||||
env:
|
|
||||||
CI_WEB_PROXY: ${{ secrets.CI_WEB_PROXY }}
|
|
||||||
FREQTRADE__EXCHANGE__KEY: ${{ secrets.BINANCE_EXCHANGE_KEY }}
|
|
||||||
FREQTRADE__EXCHANGE__SECRET: ${{ secrets.BINANCE_EXCHANGE_SECRET }}
|
|
||||||
run: python build_helpers/binance_update_lev_tiers.py
|
|
||||||
|
|
||||||
|
|
||||||
- uses: peter-evans/create-pull-request@v7
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.REPO_SCOPED_TOKEN }}
|
|
||||||
add-paths: freqtrade/exchange/binance_leverage_tiers.json
|
|
||||||
labels: |
|
|
||||||
Tech maintenance
|
|
||||||
Dependencies
|
|
||||||
branch: update/binance-leverage-tiers
|
|
||||||
title: Update Binance Leverage Tiers
|
|
||||||
commit-message: "chore: update pre-commit hooks"
|
|
||||||
committer: Freqtrade Bot <noreply@github.com>
|
|
||||||
body: Update binance leverage tiers.
|
|
||||||
delete-branch: true
|
|
||||||
465
.github/workflows/ci.yml
vendored
@@ -11,42 +11,42 @@ on:
|
|||||||
types: [published]
|
types: [published]
|
||||||
pull_request:
|
pull_request:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 3 * * 4'
|
- cron: '0 5 * * 4'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: "${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}"
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
permissions:
|
|
||||||
repository-projects: read
|
|
||||||
jobs:
|
jobs:
|
||||||
build-linux:
|
build_linux:
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ "ubuntu-20.04", "ubuntu-22.04", "ubuntu-24.04" ]
|
os: [ ubuntu-18.04, ubuntu-20.04, ubuntu-22.04 ]
|
||||||
python-version: ["3.10", "3.11", "3.12"]
|
python-version: ["3.8", "3.9", "3.10.6"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
- name: Cache_dependencies
|
- name: Cache_dependencies
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v3
|
||||||
id: cache
|
id: cache
|
||||||
with:
|
with:
|
||||||
path: ~/dependencies/
|
path: ~/dependencies/
|
||||||
key: ${{ runner.os }}-dependencies
|
key: ${{ runner.os }}-dependencies
|
||||||
|
|
||||||
- name: pip cache (linux)
|
- name: pip cache (linux)
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v3
|
||||||
|
if: runner.os == 'Linux'
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: pip-${{ matrix.python-version }}-ubuntu
|
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
|
||||||
|
|
||||||
- name: TA binary *nix
|
- name: TA binary *nix
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
@@ -54,31 +54,27 @@ jobs:
|
|||||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||||
|
|
||||||
- name: Installation - *nix
|
- name: Installation - *nix
|
||||||
|
if: runner.os == 'Linux'
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip wheel
|
python -m pip install --upgrade pip wheel
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||||
pip install -r requirements-dev.txt
|
pip install -r requirements-dev.txt
|
||||||
pip install -e ft_client/
|
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
- name: Check for version alignment
|
|
||||||
run: |
|
|
||||||
python build_helpers/freqtrade_client_version_align.py
|
|
||||||
|
|
||||||
- name: Tests
|
- name: Tests
|
||||||
if: (!(runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04'))
|
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order
|
pytest --random-order --cov=freqtrade --cov-config=.coveragerc
|
||||||
|
if: matrix.python-version != '3.9' || matrix.os != 'ubuntu-22.04'
|
||||||
|
|
||||||
- name: Tests with Coveralls
|
- name: Tests incl. ccxt compatibility tests
|
||||||
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04')
|
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order --cov=freqtrade --cov=freqtrade_client --cov-config=.coveragerc
|
pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun
|
||||||
|
if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-22.04'
|
||||||
|
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04')
|
if: (runner.os == 'Linux' && matrix.python-version == '3.9')
|
||||||
env:
|
env:
|
||||||
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
||||||
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
||||||
@@ -86,25 +82,9 @@ jobs:
|
|||||||
# Allow failure for coveralls
|
# Allow failure for coveralls
|
||||||
coveralls || true
|
coveralls || true
|
||||||
|
|
||||||
- name: Run json schema extract
|
|
||||||
# This should be kept before the repository check to ensure that the schema is up-to-date
|
|
||||||
run: |
|
|
||||||
python build_helpers/extract_config_json_schema.py
|
|
||||||
|
|
||||||
- name: Check for repository changes
|
|
||||||
run: |
|
|
||||||
if [ -n "$(git status --porcelain)" ]; then
|
|
||||||
echo "Repository is dirty, changes detected:"
|
|
||||||
git status
|
|
||||||
git diff
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Repository is clean, no changes detected."
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Backtesting (multi)
|
- name: Backtesting (multi)
|
||||||
run: |
|
run: |
|
||||||
cp tests/testdata/config.tests.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
freqtrade create-userdir --userdir user_data
|
freqtrade create-userdir --userdir user_data
|
||||||
freqtrade new-strategy -s AwesomeStrategy
|
freqtrade new-strategy -s AwesomeStrategy
|
||||||
freqtrade new-strategy -s AwesomeStrategyMin --template minimal
|
freqtrade new-strategy -s AwesomeStrategyMin --template minimal
|
||||||
@@ -112,22 +92,18 @@ jobs:
|
|||||||
|
|
||||||
- name: Hyperopt
|
- name: Hyperopt
|
||||||
run: |
|
run: |
|
||||||
cp tests/testdata/config.tests.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
freqtrade create-userdir --userdir user_data
|
freqtrade create-userdir --userdir user_data
|
||||||
freqtrade hyperopt --datadir tests/testdata -e 6 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
||||||
|
|
||||||
|
- name: Flake8
|
||||||
|
run: |
|
||||||
|
flake8
|
||||||
|
|
||||||
- name: Sort imports (isort)
|
- name: Sort imports (isort)
|
||||||
run: |
|
run: |
|
||||||
isort --check .
|
isort --check .
|
||||||
|
|
||||||
- name: Run Ruff
|
|
||||||
run: |
|
|
||||||
ruff check --output-format=github
|
|
||||||
|
|
||||||
- name: Run Ruff format check
|
|
||||||
run: |
|
|
||||||
ruff format --check
|
|
||||||
|
|
||||||
- name: Mypy
|
- name: Mypy
|
||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts tests
|
mypy freqtrade scripts tests
|
||||||
@@ -140,114 +116,77 @@ jobs:
|
|||||||
details: Freqtrade CI failed on ${{ matrix.os }}
|
details: Freqtrade CI failed on ${{ matrix.os }}
|
||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
build-macos:
|
build_macos:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ "macos-13", "macos-14", "macos-15" ]
|
os: [ macos-latest ]
|
||||||
python-version: ["3.10", "3.11", "3.12"]
|
python-version: ["3.8", "3.9", "3.10.6"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Cache_dependencies
|
- name: Cache_dependencies
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v3
|
||||||
id: cache
|
id: cache
|
||||||
with:
|
with:
|
||||||
path: ~/dependencies/
|
path: ~/dependencies/
|
||||||
key: ${{ matrix.os }}-dependencies
|
key: ${{ runner.os }}-dependencies
|
||||||
|
|
||||||
- name: pip cache (macOS)
|
- name: pip cache (macOS)
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v3
|
||||||
|
if: runner.os == 'macOS'
|
||||||
with:
|
with:
|
||||||
path: ~/Library/Caches/pip
|
path: ~/Library/Caches/pip
|
||||||
key: pip-${{ matrix.os }}-${{ matrix.python-version }}
|
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
|
||||||
|
|
||||||
- name: TA binary *nix
|
- name: TA binary *nix
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
run: |
|
run: |
|
||||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||||
|
|
||||||
- name: Installation - macOS (Brew)
|
- name: Installation - macOS
|
||||||
run: |
|
if: runner.os == 'macOS'
|
||||||
# brew update
|
|
||||||
# TODO: Should be the brew upgrade
|
|
||||||
# homebrew fails to update python due to unlinking failures
|
|
||||||
# https://github.com/actions/runner-images/issues/6817
|
|
||||||
rm /usr/local/bin/2to3 || true
|
|
||||||
rm /usr/local/bin/2to3-3.11 || true
|
|
||||||
rm /usr/local/bin/2to3-3.12 || true
|
|
||||||
rm /usr/local/bin/idle3 || true
|
|
||||||
rm /usr/local/bin/idle3.11 || true
|
|
||||||
rm /usr/local/bin/idle3.12 || true
|
|
||||||
rm /usr/local/bin/pydoc3 || true
|
|
||||||
rm /usr/local/bin/pydoc3.11 || true
|
|
||||||
rm /usr/local/bin/pydoc3.12 || true
|
|
||||||
rm /usr/local/bin/python3 || true
|
|
||||||
rm /usr/local/bin/python3.11 || true
|
|
||||||
rm /usr/local/bin/python3.12 || true
|
|
||||||
rm /usr/local/bin/python3-config || true
|
|
||||||
rm /usr/local/bin/python3.11-config || true
|
|
||||||
rm /usr/local/bin/python3.12-config || true
|
|
||||||
|
|
||||||
brew install libomp
|
|
||||||
|
|
||||||
- name: Installation (python)
|
|
||||||
run: |
|
run: |
|
||||||
|
brew update
|
||||||
|
brew install hdf5 c-blosc
|
||||||
python -m pip install --upgrade pip wheel
|
python -m pip install --upgrade pip wheel
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||||
pip install -r requirements-dev.txt
|
pip install -r requirements-dev.txt
|
||||||
pip install -e ft_client/
|
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order
|
pytest --random-order
|
||||||
|
|
||||||
- name: Check for repository changes
|
|
||||||
run: |
|
|
||||||
if [ -n "$(git status --porcelain)" ]; then
|
|
||||||
echo "Repository is dirty, changes detected:"
|
|
||||||
git status
|
|
||||||
git diff
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Repository is clean, no changes detected."
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Backtesting
|
- name: Backtesting
|
||||||
run: |
|
run: |
|
||||||
cp tests/testdata/config.tests.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
freqtrade create-userdir --userdir user_data
|
freqtrade create-userdir --userdir user_data
|
||||||
freqtrade new-strategy -s AwesomeStrategyAdv --template advanced
|
freqtrade new-strategy -s AwesomeStrategyAdv --template advanced
|
||||||
freqtrade backtesting --datadir tests/testdata --strategy AwesomeStrategyAdv
|
freqtrade backtesting --datadir tests/testdata --strategy AwesomeStrategyAdv
|
||||||
|
|
||||||
- name: Hyperopt
|
- name: Hyperopt
|
||||||
run: |
|
run: |
|
||||||
cp tests/testdata/config.tests.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
freqtrade create-userdir --userdir user_data
|
freqtrade create-userdir --userdir user_data
|
||||||
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
||||||
|
|
||||||
|
- name: Flake8
|
||||||
|
run: |
|
||||||
|
flake8
|
||||||
|
|
||||||
- name: Sort imports (isort)
|
- name: Sort imports (isort)
|
||||||
run: |
|
run: |
|
||||||
isort --check .
|
isort --check .
|
||||||
|
|
||||||
- name: Run Ruff
|
|
||||||
run: |
|
|
||||||
ruff check --output-format=github
|
|
||||||
|
|
||||||
- name: Run Ruff format check
|
|
||||||
run: |
|
|
||||||
ruff format --check
|
|
||||||
|
|
||||||
- name: Mypy
|
- name: Mypy
|
||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts
|
mypy freqtrade scripts
|
||||||
@@ -260,92 +199,56 @@ jobs:
|
|||||||
details: Test Succeeded!
|
details: Test Succeeded!
|
||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
build-windows:
|
build_windows:
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ windows-latest ]
|
os: [ windows-latest ]
|
||||||
python-version: ["3.10", "3.11", "3.12"]
|
python-version: ["3.8", "3.9", "3.10.6"]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
- name: Install uv
|
- name: Pip cache (Windows)
|
||||||
uses: astral-sh/setup-uv@v5
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
enable-cache: true
|
path: ~\AppData\Local\pip\Cache
|
||||||
cache-dependency-glob: "requirements**.txt"
|
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
|
||||||
cache-suffix: "${{ matrix.python-version }}"
|
|
||||||
prune-cache: false
|
|
||||||
|
|
||||||
- name: Installation
|
- name: Installation
|
||||||
run: |
|
run: |
|
||||||
uv venv
|
|
||||||
.venv\Scripts\activate
|
|
||||||
# persist the venv path for future steps
|
|
||||||
"$(pwd)/.venv/Scripts" >> $env:GITHUB_PATH
|
|
||||||
|
|
||||||
function uvpipFunction { uv pip $args }
|
|
||||||
Set-Alias -name pip -value uvpipFunction
|
|
||||||
./build_helpers/install_windows.ps1
|
./build_helpers/install_windows.ps1
|
||||||
|
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order
|
pytest --random-order
|
||||||
|
|
||||||
- name: Check for repository changes
|
|
||||||
run: |
|
|
||||||
if (git status --porcelain) {
|
|
||||||
Write-Host "Repository is dirty, changes detected:"
|
|
||||||
git status
|
|
||||||
git diff
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
Write-Host "Repository is clean, no changes detected."
|
|
||||||
}
|
|
||||||
|
|
||||||
- name: Backtesting
|
- name: Backtesting
|
||||||
run: |
|
run: |
|
||||||
cp tests/testdata/config.tests.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
freqtrade create-userdir --userdir user_data
|
freqtrade create-userdir --userdir user_data
|
||||||
freqtrade backtesting --datadir tests/testdata --strategy SampleStrategy
|
freqtrade backtesting --datadir tests/testdata --strategy SampleStrategy
|
||||||
|
|
||||||
- name: Hyperopt
|
- name: Hyperopt
|
||||||
run: |
|
run: |
|
||||||
cp tests/testdata/config.tests.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
freqtrade create-userdir --userdir user_data
|
freqtrade create-userdir --userdir user_data
|
||||||
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
|
||||||
|
|
||||||
- name: Run Ruff
|
- name: Flake8
|
||||||
run: |
|
run: |
|
||||||
ruff check --output-format=github
|
flake8
|
||||||
|
|
||||||
- name: Run Ruff format check
|
|
||||||
run: |
|
|
||||||
ruff format --check
|
|
||||||
|
|
||||||
- name: Mypy
|
- name: Mypy
|
||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts tests
|
mypy freqtrade scripts tests
|
||||||
|
|
||||||
- name: Run Pester tests (PowerShell)
|
|
||||||
run: |
|
|
||||||
$PSVersionTable
|
|
||||||
Set-PSRepository psgallery -InstallationPolicy trusted
|
|
||||||
Install-Module -Name Pester -RequiredVersion 5.3.1 -Confirm:$false -Force -SkipPublisherCheck
|
|
||||||
$Error.clear()
|
|
||||||
Invoke-Pester -Path "tests" -CI
|
|
||||||
if ($Error.Length -gt 0) {exit 1}
|
|
||||||
|
|
||||||
shell: powershell
|
|
||||||
|
|
||||||
- name: Discord notification
|
- name: Discord notification
|
||||||
uses: rjstone/discord-webhook-notify@v1
|
uses: rjstone/discord-webhook-notify@v1
|
||||||
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
if: failure() && ( github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)
|
||||||
@@ -354,15 +257,15 @@ jobs:
|
|||||||
details: Test Failed
|
details: Test Failed
|
||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
mypy-version-check:
|
mypy_version_check:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: pre-commit dependencies
|
- name: pre-commit dependencies
|
||||||
run: |
|
run: |
|
||||||
@@ -372,30 +275,31 @@ jobs:
|
|||||||
pre-commit:
|
pre-commit:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: "3.10"
|
||||||
- uses: pre-commit/action@v3.0.1
|
- uses: pre-commit/action@v3.0.0
|
||||||
|
|
||||||
docs-check:
|
docs_check:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Documentation syntax
|
- name: Documentation syntax
|
||||||
run: |
|
run: |
|
||||||
./tests/test_docs.sh
|
./tests/test_docs.sh
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: "3.10"
|
||||||
|
|
||||||
- name: Documentation build
|
- name: Documentation build
|
||||||
run: |
|
run: |
|
||||||
pip install -r docs/requirements-docs.txt
|
pip install -r docs/requirements-docs.txt
|
||||||
|
pip install mkdocs
|
||||||
mkdocs build
|
mkdocs build
|
||||||
|
|
||||||
- name: Discord notification
|
- name: Discord notification
|
||||||
@@ -406,67 +310,12 @@ jobs:
|
|||||||
details: Freqtrade doc test failed!
|
details: Freqtrade doc test failed!
|
||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
|
# Notify only once - when CI completes (and after deploy) in case it's successfull
|
||||||
build-linux-online:
|
|
||||||
# Run pytest with "live" checks
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.12"
|
|
||||||
|
|
||||||
- name: Cache_dependencies
|
|
||||||
uses: actions/cache@v4
|
|
||||||
id: cache
|
|
||||||
with:
|
|
||||||
path: ~/dependencies/
|
|
||||||
key: ${{ runner.os }}-dependencies
|
|
||||||
|
|
||||||
- name: pip cache (linux)
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pip
|
|
||||||
key: pip-3.12-ubuntu
|
|
||||||
|
|
||||||
- name: TA binary *nix
|
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
|
||||||
|
|
||||||
- name: Installation - *nix
|
|
||||||
run: |
|
|
||||||
python -m pip install --upgrade pip wheel
|
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
|
||||||
pip install -r requirements-dev.txt
|
|
||||||
pip install -e ft_client/
|
|
||||||
pip install -e .
|
|
||||||
|
|
||||||
- name: Tests incl. ccxt compatibility tests
|
|
||||||
env:
|
|
||||||
CI_WEB_PROXY: http://152.67.78.211:13128
|
|
||||||
run: |
|
|
||||||
pytest --random-order --longrun --durations 20 -n auto
|
|
||||||
|
|
||||||
|
|
||||||
# Notify only once - when CI completes (and after deploy) in case it's successful
|
|
||||||
notify-complete:
|
notify-complete:
|
||||||
needs: [
|
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ]
|
||||||
build-linux,
|
runs-on: ubuntu-20.04
|
||||||
build-macos,
|
|
||||||
build-windows,
|
|
||||||
docs-check,
|
|
||||||
mypy-version-check,
|
|
||||||
pre-commit,
|
|
||||||
build-linux-online
|
|
||||||
]
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
# Discord notification can't handle schedule events
|
# Discord notification can't handle schedule events
|
||||||
if: github.event_name != 'schedule' && github.repository == 'freqtrade/freqtrade'
|
if: (github.event_name != 'schedule')
|
||||||
permissions:
|
permissions:
|
||||||
repository-projects: read
|
repository-projects: read
|
||||||
steps:
|
steps:
|
||||||
@@ -487,116 +336,44 @@ jobs:
|
|||||||
details: Test Completed!
|
details: Test Completed!
|
||||||
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
webhookUrl: ${{ secrets.DISCORD_WEBHOOK }}
|
||||||
|
|
||||||
build:
|
deploy:
|
||||||
name: "Build"
|
needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ]
|
||||||
needs: [ build-linux, build-macos, build-windows, docs-check, mypy-version-check, pre-commit ]
|
runs-on: ubuntu-20.04
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.12"
|
|
||||||
|
|
||||||
- name: Build distribution
|
|
||||||
run: |
|
|
||||||
pip install -U build
|
|
||||||
python -m build --sdist --wheel
|
|
||||||
|
|
||||||
- name: Upload artifacts 📦
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: freqtrade-build
|
|
||||||
path: |
|
|
||||||
dist
|
|
||||||
retention-days: 10
|
|
||||||
|
|
||||||
- name: Build Client distribution
|
|
||||||
run: |
|
|
||||||
pip install -U build
|
|
||||||
python -m build --sdist --wheel ft_client
|
|
||||||
|
|
||||||
- name: Upload artifacts 📦
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: freqtrade-client-build
|
|
||||||
path: |
|
|
||||||
ft_client/dist
|
|
||||||
retention-days: 10
|
|
||||||
|
|
||||||
deploy-test-pypi:
|
|
||||||
name: "Publish Python 🐍 distribution 📦 to TestPyPI"
|
|
||||||
needs: [ build ]
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
if: (github.event_name == 'release')
|
|
||||||
environment:
|
|
||||||
name: testpypi
|
|
||||||
url: https://test.pypi.org/p/freqtrade
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Download artifact 📦
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
pattern: freqtrade*-build
|
|
||||||
path: dist
|
|
||||||
merge-multiple: true
|
|
||||||
|
|
||||||
- name: Publish to PyPI (Test)
|
|
||||||
uses: pypa/gh-action-pypi-publish@v1.12.3
|
|
||||||
with:
|
|
||||||
repository-url: https://test.pypi.org/legacy/
|
|
||||||
|
|
||||||
|
|
||||||
deploy-pypi:
|
|
||||||
name: "Publish Python 🐍 distribution 📦 to PyPI"
|
|
||||||
needs: [ build ]
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
if: (github.event_name == 'release')
|
|
||||||
environment:
|
|
||||||
name: pypi
|
|
||||||
url: https://pypi.org/p/freqtrade
|
|
||||||
permissions:
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Download artifact 📦
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
pattern: freqtrade*-build
|
|
||||||
path: dist
|
|
||||||
merge-multiple: true
|
|
||||||
|
|
||||||
- name: Publish to PyPI
|
|
||||||
uses: pypa/gh-action-pypi-publish@v1.12.3
|
|
||||||
|
|
||||||
|
|
||||||
deploy-docker:
|
|
||||||
needs: [ build-linux, build-macos, build-windows, docs-check, mypy-version-check, pre-commit ]
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
|
|
||||||
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
|
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: "3.9"
|
||||||
|
|
||||||
- name: Extract branch name
|
- name: Extract branch name
|
||||||
id: extract-branch
|
shell: bash
|
||||||
|
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF##*/})"
|
||||||
|
id: extract_branch
|
||||||
|
|
||||||
|
- name: Build distribution
|
||||||
run: |
|
run: |
|
||||||
echo "GITHUB_REF='${GITHUB_REF}'"
|
pip install -U setuptools wheel
|
||||||
echo "branch=${GITHUB_REF##*/}" >> "$GITHUB_OUTPUT"
|
python setup.py sdist bdist_wheel
|
||||||
|
|
||||||
|
- name: Publish to PyPI (Test)
|
||||||
|
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||||
|
if: (github.event_name == 'release')
|
||||||
|
with:
|
||||||
|
user: __token__
|
||||||
|
password: ${{ secrets.pypi_test_password }}
|
||||||
|
repository_url: https://test.pypi.org/legacy/
|
||||||
|
|
||||||
|
- name: Publish to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@v1.5.1
|
||||||
|
if: (github.event_name == 'release')
|
||||||
|
with:
|
||||||
|
user: __token__
|
||||||
|
password: ${{ secrets.pypi_password }}
|
||||||
|
|
||||||
- name: Dockerhub login
|
- name: Dockerhub login
|
||||||
env:
|
env:
|
||||||
@@ -613,39 +390,36 @@ jobs:
|
|||||||
sudo systemctl restart docker
|
sudo systemctl restart docker
|
||||||
docker version -f '{{.Server.Experimental}}'
|
docker version -f '{{.Server.Experimental}}'
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
id: buildx
|
id: buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: crazy-max/ghaction-docker-buildx@v3.3.1
|
||||||
|
with:
|
||||||
|
buildx-version: latest
|
||||||
|
qemu-version: latest
|
||||||
|
|
||||||
- name: Available platforms
|
- name: Available platforms
|
||||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||||
|
|
||||||
- name: Build and test and push docker images
|
- name: Build and test and push docker images
|
||||||
env:
|
env:
|
||||||
BRANCH_NAME: ${{ steps.extract-branch.outputs.branch }}
|
IMAGE_NAME: freqtradeorg/freqtrade
|
||||||
|
BRANCH_NAME: ${{ steps.extract_branch.outputs.branch }}
|
||||||
run: |
|
run: |
|
||||||
build_helpers/publish_docker_multi.sh
|
build_helpers/publish_docker_multi.sh
|
||||||
|
|
||||||
deploy-arm:
|
deploy_arm:
|
||||||
name: "Deploy Docker"
|
needs: [ deploy ]
|
||||||
permissions:
|
|
||||||
packages: write
|
|
||||||
needs: [ deploy-docker ]
|
|
||||||
# Only run on 64bit machines
|
# Only run on 64bit machines
|
||||||
runs-on: [self-hosted, linux, ARM64]
|
runs-on: [self-hosted, linux, ARM64]
|
||||||
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
|
if: (github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'release') && github.repository == 'freqtrade/freqtrade'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Extract branch name
|
- name: Extract branch name
|
||||||
id: extract-branch
|
shell: bash
|
||||||
run: |
|
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF##*/})"
|
||||||
echo "GITHUB_REF='${GITHUB_REF}'"
|
id: extract_branch
|
||||||
echo "branch=${GITHUB_REF##*/}" >> "$GITHUB_OUTPUT"
|
|
||||||
|
|
||||||
- name: Dockerhub login
|
- name: Dockerhub login
|
||||||
env:
|
env:
|
||||||
@@ -656,9 +430,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Build and test and push docker images
|
- name: Build and test and push docker images
|
||||||
env:
|
env:
|
||||||
BRANCH_NAME: ${{ steps.extract-branch.outputs.branch }}
|
IMAGE_NAME: freqtradeorg/freqtrade
|
||||||
GHCR_USERNAME: ${{ github.actor }}
|
BRANCH_NAME: ${{ steps.extract_branch.outputs.branch }}
|
||||||
GHCR_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
run: |
|
run: |
|
||||||
build_helpers/publish_docker_arm64.sh
|
build_helpers/publish_docker_arm64.sh
|
||||||
|
|
||||||
|
|||||||
55
.github/workflows/deploy-docs.yml
vendored
@@ -1,55 +0,0 @@
|
|||||||
name: Build Documentation
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- develop
|
|
||||||
release:
|
|
||||||
types: [published]
|
|
||||||
|
|
||||||
|
|
||||||
# disable permissions for all of the available permissions
|
|
||||||
permissions: {}
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-docs:
|
|
||||||
permissions:
|
|
||||||
contents: write # for mike to push
|
|
||||||
name: Deploy Docs through mike
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up Python
|
|
||||||
uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: '3.12'
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m pip install --upgrade pip
|
|
||||||
pip install -r docs/requirements-docs.txt
|
|
||||||
|
|
||||||
- name: Fetch gh-pages branch
|
|
||||||
run: |
|
|
||||||
git fetch origin gh-pages --depth=1
|
|
||||||
|
|
||||||
- name: Configure Git user
|
|
||||||
run: |
|
|
||||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
|
||||||
git config --local user.name "github-actions[bot]"
|
|
||||||
|
|
||||||
- name: Build and push Mike
|
|
||||||
if: ${{ github.event_name == 'push' }}
|
|
||||||
run: |
|
|
||||||
mike deploy ${{ github.ref_name }} latest --push --update-aliases
|
|
||||||
|
|
||||||
- name: Build and push Mike - Release
|
|
||||||
if: ${{ github.event_name == 'release' }}
|
|
||||||
run: |
|
|
||||||
mike deploy ${{ github.ref_name }} stable --push --update-aliases
|
|
||||||
|
|
||||||
- name: Show mike versions
|
|
||||||
run: |
|
|
||||||
mike list
|
|
||||||
45
.github/workflows/devcontainer-build.yml
vendored
@@ -1,45 +0,0 @@
|
|||||||
name: Devcontainer Pre-Build
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * 0"
|
|
||||||
# push:
|
|
||||||
# branches:
|
|
||||||
# - "master"
|
|
||||||
# tags:
|
|
||||||
# - "v*.*.*"
|
|
||||||
# pull_requests:
|
|
||||||
# branches:
|
|
||||||
# - "master"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: "${{ github.workflow }}"
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-push:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
id: checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
-
|
|
||||||
name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Pre-build dev container image
|
|
||||||
uses: devcontainers/ci@v0.3
|
|
||||||
with:
|
|
||||||
subFolder: .github
|
|
||||||
imageName: ghcr.io/${{ github.repository }}-devcontainer
|
|
||||||
cacheFrom: ghcr.io/${{ github.repository }}-devcontainer
|
|
||||||
push: always
|
|
||||||
18
.github/workflows/docker-update-readme.yml
vendored
@@ -1,18 +0,0 @@
|
|||||||
name: Update Docker Hub Description
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- stable
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dockerHubDescription:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Docker Hub Description
|
|
||||||
uses: peter-evans/dockerhub-description@v4
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
||||||
repository: freqtradeorg/freqtrade
|
|
||||||
17
.github/workflows/docker_update_readme.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
name: Update Docker Hub Description
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- stable
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dockerHubDescription:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Docker Hub Description
|
||||||
|
uses: peter-evans/dockerhub-description@v3
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
DOCKERHUB_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
DOCKERHUB_REPOSITORY: freqtradeorg/freqtrade
|
||||||
23
.github/workflows/draft-pdf.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
paper:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Paper Draft
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Build draft PDF
|
||||||
|
uses: openjournals/openjournals-draft-action@master
|
||||||
|
with:
|
||||||
|
journal: joss
|
||||||
|
# This should be the path to the paper within your repo.
|
||||||
|
paper-path: docs/JOSS_paper/paper.md
|
||||||
|
- name: Upload
|
||||||
|
uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: paper
|
||||||
|
# This is the output path where Pandoc will write the compiled
|
||||||
|
# PDF. Note, this should be the same directory as the input
|
||||||
|
# paper.md
|
||||||
|
path: docs/JOSS_paper/paper.pdf
|
||||||
41
.github/workflows/pre-commit-update.yml
vendored
@@ -1,41 +0,0 @@
|
|||||||
name: Pre-commit auto-update
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 3 * * 2"
|
|
||||||
# on demand
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
auto-update:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: "3.12"
|
|
||||||
|
|
||||||
|
|
||||||
- name: Install pre-commit
|
|
||||||
run: pip install pre-commit
|
|
||||||
|
|
||||||
- name: Run auto-update
|
|
||||||
run: pre-commit autoupdate
|
|
||||||
|
|
||||||
- uses: peter-evans/create-pull-request@v7
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.REPO_SCOPED_TOKEN }}
|
|
||||||
add-paths: .pre-commit-config.yaml
|
|
||||||
labels: |
|
|
||||||
Tech maintenance
|
|
||||||
Dependencies
|
|
||||||
branch: update/pre-commit-hooks
|
|
||||||
title: Update pre-commit hooks
|
|
||||||
commit-message: "chore: update pre-commit hooks"
|
|
||||||
committer: Freqtrade Bot <noreply@github.com>
|
|
||||||
body: Update versions of pre-commit hooks to latest version.
|
|
||||||
delete-branch: true
|
|
||||||
8
.gitignore
vendored
@@ -83,9 +83,6 @@ instance/
|
|||||||
# Scrapy stuff:
|
# Scrapy stuff:
|
||||||
.scrapy
|
.scrapy
|
||||||
|
|
||||||
# memray
|
|
||||||
memray-*
|
|
||||||
|
|
||||||
# Sphinx documentation
|
# Sphinx documentation
|
||||||
docs/_build/
|
docs/_build/
|
||||||
# Mkdocs documentation
|
# Mkdocs documentation
|
||||||
@@ -111,8 +108,9 @@ target/
|
|||||||
#exceptions
|
#exceptions
|
||||||
!*.gitkeep
|
!*.gitkeep
|
||||||
!config_examples/config_binance.example.json
|
!config_examples/config_binance.example.json
|
||||||
|
!config_examples/config_bittrex.example.json
|
||||||
|
!config_examples/config_ftx.example.json
|
||||||
!config_examples/config_full.example.json
|
!config_examples/config_full.example.json
|
||||||
!config_examples/config_kraken.example.json
|
!config_examples/config_kraken.example.json
|
||||||
!config_examples/config_freqai.example.json
|
!config_examples/config_freqai.example.json
|
||||||
|
!config_examples/config_freqai-rl.example.json
|
||||||
docker-compose-*.yml
|
|
||||||
|
|||||||
@@ -2,42 +2,33 @@
|
|||||||
# See https://pre-commit.com/hooks.html for more hooks
|
# See https://pre-commit.com/hooks.html for more hooks
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pycqa/flake8
|
- repo: https://github.com/pycqa/flake8
|
||||||
rev: "7.1.1"
|
rev: "4.0.1"
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
additional_dependencies: [Flake8-pyproject]
|
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
rev: "v1.14.0"
|
rev: "v0.942"
|
||||||
hooks:
|
hooks:
|
||||||
- id: mypy
|
- id: mypy
|
||||||
exclude: build_helpers
|
exclude: build_helpers
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- types-cachetools==5.5.0.20240820
|
- types-cachetools==5.2.1
|
||||||
- types-filelock==3.2.7
|
- types-filelock==3.2.7
|
||||||
- types-requests==2.32.0.20241016
|
- types-requests==2.28.11
|
||||||
- types-tabulate==0.9.0.20241207
|
- types-tabulate==0.8.11
|
||||||
- types-python-dateutil==2.9.0.20241206
|
- types-python-dateutil==2.8.19
|
||||||
- SQLAlchemy==2.0.36
|
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
rev: "5.13.2"
|
rev: "5.10.1"
|
||||||
hooks:
|
hooks:
|
||||||
- id: isort
|
- id: isort
|
||||||
name: isort (python)
|
name: isort (python)
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
|
||||||
# Ruff version.
|
|
||||||
rev: 'v0.8.4'
|
|
||||||
hooks:
|
|
||||||
- id: ruff
|
|
||||||
- id: ruff-format
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v5.0.0
|
rev: v2.4.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: end-of-file-fixer
|
- id: end-of-file-fixer
|
||||||
exclude: |
|
exclude: |
|
||||||
@@ -55,15 +46,3 @@ repos:
|
|||||||
(?x)^(
|
(?x)^(
|
||||||
.*\.md
|
.*\.md
|
||||||
)$
|
)$
|
||||||
|
|
||||||
- repo: https://github.com/stefmolin/exif-stripper
|
|
||||||
rev: 0.6.1
|
|
||||||
hooks:
|
|
||||||
- id: strip-exif
|
|
||||||
|
|
||||||
- repo: https://github.com/codespell-project/codespell
|
|
||||||
rev: v2.3.0
|
|
||||||
hooks:
|
|
||||||
- id: codespell
|
|
||||||
additional_dependencies:
|
|
||||||
- tomli
|
|
||||||
|
|||||||
@@ -1,14 +1,8 @@
|
|||||||
# .readthedocs.yml
|
# .readthedocs.yml
|
||||||
version: 2
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
os: "ubuntu-22.04"
|
image: latest
|
||||||
tools:
|
|
||||||
python: "3.11"
|
|
||||||
|
|
||||||
python:
|
python:
|
||||||
install:
|
version: 3.8
|
||||||
- requirements: docs/requirements-docs.txt
|
setup_py_install: false
|
||||||
|
|
||||||
mkdocs:
|
|
||||||
configuration: mkdocs.yml
|
|
||||||
|
|||||||
11
.vscode/extensions.json
vendored
@@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"recommendations": [
|
|
||||||
"ms-python.python",
|
|
||||||
"ms-python.vscode-pylance",
|
|
||||||
"charliermarsh.ruff",
|
|
||||||
"davidanson.vscode-markdownlint",
|
|
||||||
"ms-azuretools.vscode-docker",
|
|
||||||
"vscode-icons-team.vscode-icons",
|
|
||||||
"github.vscode-github-actions",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -45,17 +45,16 @@ pytest tests/test_<file_name>.py::test_<method_name>
|
|||||||
|
|
||||||
### 2. Test if your code is PEP8 compliant
|
### 2. Test if your code is PEP8 compliant
|
||||||
|
|
||||||
#### Run Ruff
|
#### Run Flake8
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ruff check .
|
flake8 freqtrade tests scripts
|
||||||
```
|
```
|
||||||
|
|
||||||
We receive a lot of code that fails the `ruff` checks.
|
We receive a lot of code that fails the `flake8` checks.
|
||||||
To help with that, we encourage you to install the git pre-commit
|
To help with that, we encourage you to install the git pre-commit
|
||||||
hook that will warn you when you try to commit code that fails these checks.
|
hook that will warn you when you try to commit code that fails these checks.
|
||||||
|
Guide for installing them is [here](http://flake8.pycqa.org/en/latest/user/using-hooks.html).
|
||||||
you can manually run pre-commit with `pre-commit run -a`.
|
|
||||||
|
|
||||||
##### Additional styles applied
|
##### Additional styles applied
|
||||||
|
|
||||||
@@ -72,12 +71,12 @@ you can manually run pre-commit with `pre-commit run -a`.
|
|||||||
mypy freqtrade
|
mypy freqtrade
|
||||||
```
|
```
|
||||||
|
|
||||||
### 4. Ensure formatting is correct
|
### 4. Ensure all imports are correct
|
||||||
|
|
||||||
#### Run ruff
|
#### Run isort
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
ruff format .
|
isort .
|
||||||
```
|
```
|
||||||
|
|
||||||
## (Core)-Committer Guide
|
## (Core)-Committer Guide
|
||||||
@@ -125,7 +124,7 @@ Exceptions:
|
|||||||
|
|
||||||
Contributors may be given commit privileges. Preference will be given to those with:
|
Contributors may be given commit privileges. Preference will be given to those with:
|
||||||
|
|
||||||
1. Past contributions to Freqtrade and other related open-source projects. Contributions to Freqtrade include both code (both accepted and pending) and friendly participation in the issue tracker and Pull request reviews. Both quantity and quality are considered.
|
1. Past contributions to Freqtrade and other related open-source projects. Contributions to Freqtrade include both code (both accepted and pending) and friendly participation in the issue tracker and Pull request reviews. Quantity and quality are considered.
|
||||||
1. A coding style that the other core committers find simple, minimal, and clean.
|
1. A coding style that the other core committers find simple, minimal, and clean.
|
||||||
1. Access to resources for cross-platform development and testing.
|
1. Access to resources for cross-platform development and testing.
|
||||||
1. Time to devote to the project regularly.
|
1. Time to devote to the project regularly.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM python:3.12.7-slim-bookworm as base
|
FROM python:3.10.7-slim-bullseye as base
|
||||||
|
|
||||||
# Setup env
|
# Setup env
|
||||||
ENV LANG C.UTF-8
|
ENV LANG C.UTF-8
|
||||||
@@ -25,7 +25,7 @@ FROM base as python-deps
|
|||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \
|
&& apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& pip install --upgrade pip wheel
|
&& pip install --upgrade pip
|
||||||
|
|
||||||
# Install TA-lib
|
# Install TA-lib
|
||||||
COPY build_helpers/* /tmp/
|
COPY build_helpers/* /tmp/
|
||||||
@@ -35,7 +35,7 @@ ENV LD_LIBRARY_PATH /usr/local/lib
|
|||||||
# Install dependencies
|
# Install dependencies
|
||||||
COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/
|
COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/
|
||||||
USER ftuser
|
USER ftuser
|
||||||
RUN pip install --user --no-cache-dir "numpy<2.0" \
|
RUN pip install --user --no-cache-dir numpy \
|
||||||
&& pip install --user --no-cache-dir -r requirements-hyperopt.txt
|
&& pip install --user --no-cache-dir -r requirements-hyperopt.txt
|
||||||
|
|
||||||
# Copy dependencies to runtime-image
|
# Copy dependencies to runtime-image
|
||||||
|
|||||||
@@ -5,5 +5,3 @@ recursive-include freqtrade/templates/ *.j2 *.ipynb
|
|||||||
include freqtrade/exchange/binance_leverage_tiers.json
|
include freqtrade/exchange/binance_leverage_tiers.json
|
||||||
include freqtrade/rpc/api_server/ui/fallback_file.html
|
include freqtrade/rpc/api_server/ui/fallback_file.html
|
||||||
include freqtrade/rpc/api_server/ui/favicon.ico
|
include freqtrade/rpc/api_server/ui/favicon.ico
|
||||||
|
|
||||||
prune tests
|
|
||||||
|
|||||||
43
README.md
@@ -1,7 +1,6 @@
|
|||||||
# 
|
# 
|
||||||
|
|
||||||
[](https://github.com/freqtrade/freqtrade/actions/)
|
[](https://github.com/freqtrade/freqtrade/actions/)
|
||||||
[](https://doi.org/10.21105/joss.04864)
|
|
||||||
[](https://coveralls.io/github/freqtrade/freqtrade?branch=develop)
|
[](https://coveralls.io/github/freqtrade/freqtrade?branch=develop)
|
||||||
[](https://www.freqtrade.io)
|
[](https://www.freqtrade.io)
|
||||||
[](https://codeclimate.com/github/freqtrade/freqtrade/maintainability)
|
[](https://codeclimate.com/github/freqtrade/freqtrade/maintainability)
|
||||||
@@ -28,24 +27,19 @@ hesitate to read the source code and understand the mechanism of this bot.
|
|||||||
Please read the [exchange specific notes](docs/exchanges.md) to learn about eventual, special configurations needed for each exchange.
|
Please read the [exchange specific notes](docs/exchanges.md) to learn about eventual, special configurations needed for each exchange.
|
||||||
|
|
||||||
- [X] [Binance](https://www.binance.com/)
|
- [X] [Binance](https://www.binance.com/)
|
||||||
- [X] [Bitmart](https://bitmart.com/)
|
- [X] [Bittrex](https://bittrex.com/)
|
||||||
- [X] [BingX](https://bingx.com/invite/0EM9RX)
|
- [X] [FTX](https://ftx.com/#a=2258149)
|
||||||
- [X] [Bybit](https://bybit.com/)
|
|
||||||
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||||
- [X] [HTX](https://www.htx.com/)
|
- [X] [Huobi](http://huobi.com/)
|
||||||
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
|
|
||||||
- [X] [Kraken](https://kraken.com/)
|
- [X] [Kraken](https://kraken.com/)
|
||||||
- [X] [OKX](https://okx.com/)
|
- [X] [OKX](https://okx.com/) (Former OKEX)
|
||||||
- [X] [MyOKX](https://okx.com/) (OKX EEA)
|
|
||||||
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_
|
||||||
|
|
||||||
### Supported Futures Exchanges (experimental)
|
### Supported Futures Exchanges (experimental)
|
||||||
|
|
||||||
- [X] [Binance](https://www.binance.com/)
|
- [X] [Binance](https://www.binance.com/)
|
||||||
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
- [X] [Gate.io](https://www.gate.io/ref/6266643)
|
||||||
- [X] [Hyperliquid](https://hyperliquid.xyz/) (A decentralized exchange, or DEX)
|
- [X] [OKX](https://okx.com/).
|
||||||
- [X] [OKX](https://okx.com/)
|
|
||||||
- [X] [Bybit](https://bybit.com/)
|
|
||||||
|
|
||||||
Please make sure to read the [exchange specific notes](docs/exchanges.md), as well as the [trading with leverage](docs/leverage.md) documentation before diving in.
|
Please make sure to read the [exchange specific notes](docs/exchanges.md), as well as the [trading with leverage](docs/leverage.md) documentation before diving in.
|
||||||
|
|
||||||
@@ -64,7 +58,7 @@ Please find the complete documentation on the [freqtrade website](https://www.fr
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- [x] **Based on Python 3.10+**: For botting on any operating system - Windows, macOS and Linux.
|
- [x] **Based on Python 3.8+**: For botting on any operating system - Windows, macOS and Linux.
|
||||||
- [x] **Persistence**: Persistence is achieved through sqlite.
|
- [x] **Persistence**: Persistence is achieved through sqlite.
|
||||||
- [x] **Dry-run**: Run the bot without paying money.
|
- [x] **Dry-run**: Run the bot without paying money.
|
||||||
- [x] **Backtesting**: Run a simulation of your buy/sell strategy.
|
- [x] **Backtesting**: Run a simulation of your buy/sell strategy.
|
||||||
@@ -90,50 +84,41 @@ For further (native) installation methods, please refer to the [Installation doc
|
|||||||
|
|
||||||
```
|
```
|
||||||
usage: freqtrade [-h] [-V]
|
usage: freqtrade [-h] [-V]
|
||||||
{trade,create-userdir,new-config,show-config,new-strategy,download-data,convert-data,convert-trade-data,trades-to-ohlcv,list-data,backtesting,backtesting-show,backtesting-analysis,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-markets,list-pairs,list-strategies,list-freqaimodels,list-timeframes,show-trades,test-pairlist,convert-db,install-ui,plot-dataframe,plot-profit,webserver,strategy-updater,lookahead-analysis,recursive-analysis}
|
{trade,create-userdir,new-config,new-strategy,download-data,convert-data,convert-trade-data,list-data,backtesting,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-hyperopts,list-markets,list-pairs,list-strategies,list-timeframes,show-trades,test-pairlist,install-ui,plot-dataframe,plot-profit,webserver}
|
||||||
...
|
...
|
||||||
|
|
||||||
Free, open source crypto trading bot
|
Free, open source crypto trading bot
|
||||||
|
|
||||||
positional arguments:
|
positional arguments:
|
||||||
{trade,create-userdir,new-config,show-config,new-strategy,download-data,convert-data,convert-trade-data,trades-to-ohlcv,list-data,backtesting,backtesting-show,backtesting-analysis,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-markets,list-pairs,list-strategies,list-freqaimodels,list-timeframes,show-trades,test-pairlist,convert-db,install-ui,plot-dataframe,plot-profit,webserver,strategy-updater,lookahead-analysis,recursive-analysis}
|
{trade,create-userdir,new-config,new-strategy,download-data,convert-data,convert-trade-data,list-data,backtesting,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-hyperopts,list-markets,list-pairs,list-strategies,list-timeframes,show-trades,test-pairlist,install-ui,plot-dataframe,plot-profit,webserver}
|
||||||
trade Trade module.
|
trade Trade module.
|
||||||
create-userdir Create user-data directory.
|
create-userdir Create user-data directory.
|
||||||
new-config Create new config
|
new-config Create new config
|
||||||
show-config Show resolved config
|
|
||||||
new-strategy Create new strategy
|
new-strategy Create new strategy
|
||||||
download-data Download backtesting data.
|
download-data Download backtesting data.
|
||||||
convert-data Convert candle (OHLCV) data from one format to
|
convert-data Convert candle (OHLCV) data from one format to
|
||||||
another.
|
another.
|
||||||
convert-trade-data Convert trade data from one format to another.
|
convert-trade-data Convert trade data from one format to another.
|
||||||
trades-to-ohlcv Convert trade data to OHLCV data.
|
|
||||||
list-data List downloaded data.
|
list-data List downloaded data.
|
||||||
backtesting Backtesting module.
|
backtesting Backtesting module.
|
||||||
backtesting-show Show past Backtest results
|
|
||||||
backtesting-analysis
|
|
||||||
Backtest Analysis module.
|
|
||||||
edge Edge module.
|
edge Edge module.
|
||||||
hyperopt Hyperopt module.
|
hyperopt Hyperopt module.
|
||||||
hyperopt-list List Hyperopt results
|
hyperopt-list List Hyperopt results
|
||||||
hyperopt-show Show details of Hyperopt results
|
hyperopt-show Show details of Hyperopt results
|
||||||
list-exchanges Print available exchanges.
|
list-exchanges Print available exchanges.
|
||||||
|
list-hyperopts Print available hyperopt classes.
|
||||||
list-markets Print markets on exchange.
|
list-markets Print markets on exchange.
|
||||||
list-pairs Print pairs on exchange.
|
list-pairs Print pairs on exchange.
|
||||||
list-strategies Print available strategies.
|
list-strategies Print available strategies.
|
||||||
list-freqaimodels Print available freqAI models.
|
|
||||||
list-timeframes Print available timeframes for the exchange.
|
list-timeframes Print available timeframes for the exchange.
|
||||||
show-trades Show trades.
|
show-trades Show trades.
|
||||||
test-pairlist Test your pairlist configuration.
|
test-pairlist Test your pairlist configuration.
|
||||||
convert-db Migrate database to different system
|
|
||||||
install-ui Install FreqUI
|
install-ui Install FreqUI
|
||||||
plot-dataframe Plot candles with indicators.
|
plot-dataframe Plot candles with indicators.
|
||||||
plot-profit Generate plot showing profits.
|
plot-profit Generate plot showing profits.
|
||||||
webserver Webserver module.
|
webserver Webserver module.
|
||||||
strategy-updater updates outdated strategy files to the current version
|
|
||||||
lookahead-analysis Check for potential look ahead bias.
|
|
||||||
recursive-analysis Check for potential recursive formula issue.
|
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
|
|
||||||
@@ -179,10 +164,6 @@ first. If it hasn't been reported, please
|
|||||||
ensure you follow the template guide so that the team can assist you as
|
ensure you follow the template guide so that the team can assist you as
|
||||||
quickly as possible.
|
quickly as possible.
|
||||||
|
|
||||||
For every [issue](https://github.com/freqtrade/freqtrade/issues/new/choose) created, kindly follow up and mark satisfaction or reminder to close issue when equilibrium ground is reached.
|
|
||||||
|
|
||||||
--Maintain github's [community policy](https://docs.github.com/en/site-policy/github-terms/github-community-code-of-conduct)--
|
|
||||||
|
|
||||||
### [Feature Requests](https://github.com/freqtrade/freqtrade/labels/enhancement)
|
### [Feature Requests](https://github.com/freqtrade/freqtrade/labels/enhancement)
|
||||||
|
|
||||||
Have you a great idea to improve the bot you want to share? Please,
|
Have you a great idea to improve the bot you want to share? Please,
|
||||||
@@ -221,9 +202,9 @@ To run this bot we recommend you a cloud instance with a minimum of:
|
|||||||
|
|
||||||
### Software requirements
|
### Software requirements
|
||||||
|
|
||||||
- [Python >= 3.10](http://docs.python-guide.org/en/latest/starting/installation/)
|
- [Python >= 3.8](http://docs.python-guide.org/en/latest/starting/installation/)
|
||||||
- [pip](https://pip.pypa.io/en/stable/installing/)
|
- [pip](https://pip.pypa.io/en/stable/installing/)
|
||||||
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
||||||
- [TA-Lib](https://ta-lib.github.io/ta-lib-python/)
|
- [TA-Lib](https://mrjbq7.github.io/ta-lib/install.html)
|
||||||
- [virtualenv](https://virtualenv.pypa.io/en/stable/installation.html) (Recommended)
|
- [virtualenv](https://virtualenv.pypa.io/en/stable/installation.html) (Recommended)
|
||||||
- [Docker](https://www.docker.com/products/docker) (Recommended)
|
- [Docker](https://www.docker.com/products/docker) (Recommended)
|
||||||
|
|||||||
BIN
build_helpers/TA_Lib-0.4.25-cp310-cp310-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.25-cp38-cp38-win_amd64.whl
Normal file
BIN
build_helpers/TA_Lib-0.4.25-cp39-cp39-win_amd64.whl
Normal file
@@ -1,23 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import ccxt
|
|
||||||
|
|
||||||
|
|
||||||
key = os.environ.get("FREQTRADE__EXCHANGE__KEY")
|
|
||||||
secret = os.environ.get("FREQTRADE__EXCHANGE__SECRET")
|
|
||||||
|
|
||||||
proxy = os.environ.get("CI_WEB_PROXY")
|
|
||||||
|
|
||||||
exchange = ccxt.binance(
|
|
||||||
{"apiKey": key, "secret": secret, "httpsProxy": proxy, "options": {"defaultType": "swap"}}
|
|
||||||
)
|
|
||||||
_ = exchange.load_markets()
|
|
||||||
|
|
||||||
lev_tiers = exchange.fetch_leverage_tiers()
|
|
||||||
|
|
||||||
# Assumes this is running in the root of the repository.
|
|
||||||
file = Path("freqtrade/exchange/binance_leverage_tiers.json")
|
|
||||||
json.dump(dict(sorted(lev_tiers.items())), file.open("w"), indent=2)
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
"""Script to extract the configuration json schema from config_schema.py file."""
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import rapidjson
|
|
||||||
|
|
||||||
from freqtrade.configuration.config_schema import CONF_SCHEMA
|
|
||||||
|
|
||||||
|
|
||||||
def extract_config_json_schema():
|
|
||||||
schema_filename = Path(__file__).parent / "schema.json"
|
|
||||||
with schema_filename.open("w") as f:
|
|
||||||
rapidjson.dump(CONF_SCHEMA, f, indent=2)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
extract_config_json_schema()
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
from freqtrade import __version__ as ft_version
|
|
||||||
from freqtrade_client import __version__ as client_version
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
if ft_version != client_version:
|
|
||||||
print(f"Versions do not match: \nft: {ft_version} \nclient: {client_version}")
|
|
||||||
exit(1)
|
|
||||||
print(f"Versions match: ft: {ft_version}, client: {client_version}")
|
|
||||||
exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -8,9 +8,8 @@ if [ -n "$2" ] || [ ! -f "${INSTALL_LOC}/lib/libta_lib.a" ]; then
|
|||||||
tar zxvf ta-lib-0.4.0-src.tar.gz
|
tar zxvf ta-lib-0.4.0-src.tar.gz
|
||||||
cd ta-lib \
|
cd ta-lib \
|
||||||
&& sed -i.bak "s|0.00000001|0.000000000000000001 |g" src/ta_func/ta_utility.h \
|
&& sed -i.bak "s|0.00000001|0.000000000000000001 |g" src/ta_func/ta_utility.h \
|
||||||
&& echo "Downloading gcc config.guess and config.sub" \
|
&& curl 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' -o config.guess \
|
||||||
&& curl -s 'https://raw.githubusercontent.com/gcc-mirror/gcc/master/config.guess' -o config.guess \
|
&& curl 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' -o config.sub \
|
||||||
&& curl -s 'https://raw.githubusercontent.com/gcc-mirror/gcc/master/config.sub' -o config.sub \
|
|
||||||
&& ./configure --prefix=${INSTALL_LOC}/ \
|
&& ./configure --prefix=${INSTALL_LOC}/ \
|
||||||
&& make
|
&& make
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
|
|||||||
@@ -1,10 +1,18 @@
|
|||||||
# vendored Wheels compiled via https://github.com/xmatthias/ta-lib-python/tree/ta_bundled_040
|
# Downloads don't work automatically, since the URL is regenerated via javascript.
|
||||||
|
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
|
||||||
|
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip wheel
|
||||||
python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
|
||||||
|
|
||||||
pip install -U wheel "numpy<2"
|
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
||||||
pip install --only-binary ta-lib --find-links=build_helpers\ ta-lib
|
|
||||||
|
|
||||||
|
if ($pyv -eq '3.8') {
|
||||||
|
pip install build_helpers\TA_Lib-0.4.25-cp38-cp38-win_amd64.whl
|
||||||
|
}
|
||||||
|
if ($pyv -eq '3.9') {
|
||||||
|
pip install build_helpers\TA_Lib-0.4.25-cp39-cp39-win_amd64.whl
|
||||||
|
}
|
||||||
|
if ($pyv -eq '3.10') {
|
||||||
|
pip install build_helpers\TA_Lib-0.4.25-cp310-cp310-win_amd64.whl
|
||||||
|
}
|
||||||
pip install -r requirements-dev.txt
|
pip install -r requirements-dev.txt
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# File used in CI to ensure pre-commit dependencies are kept up-to-date.
|
# File used in CI to ensure pre-commit dependencies are kept uptodate.
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -6,30 +6,23 @@ from pathlib import Path
|
|||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
pre_commit_file = Path(".pre-commit-config.yaml")
|
pre_commit_file = Path('.pre-commit-config.yaml')
|
||||||
require_dev = Path("requirements-dev.txt")
|
require_dev = Path('requirements-dev.txt')
|
||||||
require = Path("requirements.txt")
|
|
||||||
|
|
||||||
with require_dev.open("r") as rfile:
|
with require_dev.open('r') as rfile:
|
||||||
requirements = rfile.readlines()
|
requirements = rfile.readlines()
|
||||||
|
|
||||||
with require.open("r") as rfile:
|
|
||||||
requirements.extend(rfile.readlines())
|
|
||||||
|
|
||||||
# Extract types only
|
# Extract types only
|
||||||
type_reqs = [
|
type_reqs = [r.strip('\n') for r in requirements if r.startswith('types-')]
|
||||||
r.strip("\n") for r in requirements if r.startswith("types-") or r.startswith("SQLAlchemy")
|
|
||||||
]
|
|
||||||
|
|
||||||
with pre_commit_file.open("r") as file:
|
with pre_commit_file.open('r') as file:
|
||||||
f = yaml.load(file, Loader=yaml.SafeLoader)
|
f = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
|
|
||||||
|
|
||||||
mypy_repo = [
|
mypy_repo = [repo for repo in f['repos'] if repo['repo']
|
||||||
repo for repo in f["repos"] if repo["repo"] == "https://github.com/pre-commit/mirrors-mypy"
|
== 'https://github.com/pre-commit/mirrors-mypy']
|
||||||
]
|
|
||||||
|
|
||||||
hooks = mypy_repo[0]["hooks"][0]["additional_dependencies"]
|
hooks = mypy_repo[0]['hooks'][0]['additional_dependencies']
|
||||||
|
|
||||||
errors = []
|
errors = []
|
||||||
for hook in hooks:
|
for hook in hooks:
|
||||||
|
|||||||
@@ -3,22 +3,16 @@
|
|||||||
# Use BuildKit, otherwise building on ARM fails
|
# Use BuildKit, otherwise building on ARM fails
|
||||||
export DOCKER_BUILDKIT=1
|
export DOCKER_BUILDKIT=1
|
||||||
|
|
||||||
IMAGE_NAME=freqtradeorg/freqtrade
|
|
||||||
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
|
||||||
GHCR_IMAGE_NAME=ghcr.io/freqtrade/freqtrade
|
|
||||||
|
|
||||||
# Replace / with _ to create a valid tag
|
# Replace / with _ to create a valid tag
|
||||||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||||
TAG_PLOT=${TAG}_plot
|
TAG_PLOT=${TAG}_plot
|
||||||
TAG_FREQAI=${TAG}_freqai
|
TAG_FREQAI=${TAG}_freqai
|
||||||
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
|
||||||
TAG_FREQAI_TORCH=${TAG_FREQAI}torch
|
|
||||||
TAG_PI="${TAG}_pi"
|
TAG_PI="${TAG}_pi"
|
||||||
|
|
||||||
TAG_ARM=${TAG}_arm
|
TAG_ARM=${TAG}_arm
|
||||||
TAG_PLOT_ARM=${TAG_PLOT}_arm
|
TAG_PLOT_ARM=${TAG_PLOT}_arm
|
||||||
TAG_FREQAI_ARM=${TAG_FREQAI}_arm
|
TAG_FREQAI_ARM=${TAG_FREQAI}_arm
|
||||||
TAG_FREQAI_RL_ARM=${TAG_FREQAI_RL}_arm
|
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
||||||
|
|
||||||
echo "Running for ${TAG}"
|
echo "Running for ${TAG}"
|
||||||
|
|
||||||
@@ -42,19 +36,17 @@ if [ $? -ne 0 ]; then
|
|||||||
echo "failed building multiarch images"
|
echo "failed building multiarch images"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot .
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai .
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_FREQAI_ARM} -t freqtrade:${TAG_FREQAI_RL_ARM} -f docker/Dockerfile.freqai_rl .
|
|
||||||
|
|
||||||
# Tag image for upload and next build step
|
# Tag image for upload and next build step
|
||||||
docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM
|
docker tag freqtrade:$TAG_ARM ${CACHE_IMAGE}:$TAG_ARM
|
||||||
|
|
||||||
|
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_PLOT_ARM} -f docker/Dockerfile.plot .
|
||||||
|
docker build --cache-from freqtrade:${TAG_ARM} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG_ARM} -t freqtrade:${TAG_FREQAI_ARM} -f docker/Dockerfile.freqai .
|
||||||
|
|
||||||
docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
docker tag freqtrade:$TAG_PLOT_ARM ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||||
docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||||
docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
|
||||||
|
|
||||||
# Run backtest
|
# Run backtest
|
||||||
docker run --rm -v $(pwd)/tests/testdata/config.tests.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "failed running backtest"
|
echo "failed running backtest"
|
||||||
@@ -63,9 +55,9 @@ fi
|
|||||||
|
|
||||||
docker images
|
docker images
|
||||||
|
|
||||||
|
# docker push ${IMAGE_NAME}
|
||||||
docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
docker push ${CACHE_IMAGE}:$TAG_PLOT_ARM
|
||||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
docker push ${CACHE_IMAGE}:$TAG_FREQAI_ARM
|
||||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
|
|
||||||
docker push ${CACHE_IMAGE}:$TAG_ARM
|
docker push ${CACHE_IMAGE}:$TAG_ARM
|
||||||
|
|
||||||
# Create multi-arch image
|
# Create multi-arch image
|
||||||
@@ -73,47 +65,22 @@ docker push ${CACHE_IMAGE}:$TAG_ARM
|
|||||||
# Otherwise installation might fail.
|
# Otherwise installation might fail.
|
||||||
echo "create manifests"
|
echo "create manifests"
|
||||||
|
|
||||||
docker manifest create ${IMAGE_NAME}:${TAG} ${CACHE_IMAGE}:${TAG} ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI}
|
docker manifest create --amend ${IMAGE_NAME}:${TAG} ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG}
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG}
|
docker manifest push -p ${IMAGE_NAME}:${TAG}
|
||||||
|
|
||||||
docker manifest create ${IMAGE_NAME}:${TAG_PLOT} ${CACHE_IMAGE}:${TAG_PLOT} ${CACHE_IMAGE}:${TAG_PLOT_ARM}
|
docker manifest create ${IMAGE_NAME}:${TAG_PLOT} ${CACHE_IMAGE}:${TAG_PLOT_ARM} ${CACHE_IMAGE}:${TAG_PLOT}
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG_PLOT}
|
docker manifest push -p ${IMAGE_NAME}:${TAG_PLOT}
|
||||||
|
|
||||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI} ${CACHE_IMAGE}:${TAG_FREQAI} ${CACHE_IMAGE}:${TAG_FREQAI_ARM}
|
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI} ${CACHE_IMAGE}:${TAG_FREQAI_ARM} ${CACHE_IMAGE}:${TAG_FREQAI}
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI}
|
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI}
|
||||||
|
|
||||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_RL}
|
|
||||||
|
|
||||||
# Create special Torch tag - which is identical to the RL tag.
|
|
||||||
docker manifest create ${IMAGE_NAME}:${TAG_FREQAI_TORCH} ${CACHE_IMAGE}:${TAG_FREQAI_RL} ${CACHE_IMAGE}:${TAG_FREQAI_RL_ARM}
|
|
||||||
docker manifest push -p ${IMAGE_NAME}:${TAG_FREQAI_TORCH}
|
|
||||||
|
|
||||||
# copy images to ghcr.io
|
|
||||||
|
|
||||||
alias crane="docker run --rm -i -v $(pwd)/.crane:/home/nonroot/.docker/ gcr.io/go-containerregistry/crane"
|
|
||||||
mkdir .crane
|
|
||||||
chmod a+rwx .crane
|
|
||||||
|
|
||||||
echo "${GHCR_TOKEN}" | crane auth login ghcr.io -u "${GHCR_USERNAME}" --password-stdin
|
|
||||||
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI_RL} ${GHCR_IMAGE_NAME}:${TAG_FREQAI_RL}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI_RL} ${GHCR_IMAGE_NAME}:${TAG_FREQAI_TORCH}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_FREQAI} ${GHCR_IMAGE_NAME}:${TAG_FREQAI}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG_PLOT} ${GHCR_IMAGE_NAME}:${TAG_PLOT}
|
|
||||||
crane copy ${IMAGE_NAME}:${TAG} ${GHCR_IMAGE_NAME}:${TAG}
|
|
||||||
|
|
||||||
# Tag as latest for develop builds
|
# Tag as latest for develop builds
|
||||||
if [ "${TAG}" = "develop" ]; then
|
if [ "${TAG}" = "develop" ]; then
|
||||||
echo 'Tagging image as latest'
|
|
||||||
docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG}
|
docker manifest create ${IMAGE_NAME}:latest ${CACHE_IMAGE}:${TAG_ARM} ${IMAGE_NAME}:${TAG_PI} ${CACHE_IMAGE}:${TAG}
|
||||||
docker manifest push -p ${IMAGE_NAME}:latest
|
docker manifest push -p ${IMAGE_NAME}:latest
|
||||||
|
|
||||||
crane copy ${IMAGE_NAME}:latest ${GHCR_IMAGE_NAME}:latest
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker images
|
docker images
|
||||||
rm -rf .crane
|
|
||||||
|
|
||||||
# Cleanup old images from arm64 node.
|
# Cleanup old images from arm64 node.
|
||||||
docker image prune -a --force --filter "until=24h"
|
docker image prune -a --force --filter "until=24h"
|
||||||
|
|||||||
@@ -2,17 +2,15 @@
|
|||||||
|
|
||||||
# The below assumes a correctly setup docker buildx environment
|
# The below assumes a correctly setup docker buildx environment
|
||||||
|
|
||||||
IMAGE_NAME=freqtradeorg/freqtrade
|
|
||||||
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
|
||||||
# Replace / with _ to create a valid tag
|
# Replace / with _ to create a valid tag
|
||||||
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
TAG=$(echo "${BRANCH_NAME}" | sed -e "s/\//_/g")
|
||||||
TAG_PLOT=${TAG}_plot
|
TAG_PLOT=${TAG}_plot
|
||||||
TAG_FREQAI=${TAG}_freqai
|
TAG_FREQAI=${TAG}_freqai
|
||||||
TAG_FREQAI_RL=${TAG_FREQAI}rl
|
|
||||||
TAG_PI="${TAG}_pi"
|
TAG_PI="${TAG}_pi"
|
||||||
|
|
||||||
PI_PLATFORM="linux/arm/v7"
|
PI_PLATFORM="linux/arm/v7"
|
||||||
echo "Running for ${TAG}"
|
echo "Running for ${TAG}"
|
||||||
|
CACHE_IMAGE=freqtradeorg/freqtrade_cache
|
||||||
CACHE_TAG=${CACHE_IMAGE}:${TAG_PI}_cache
|
CACHE_TAG=${CACHE_IMAGE}:${TAG_PI}_cache
|
||||||
|
|
||||||
# Add commit and commit_message to docker container
|
# Add commit and commit_message to docker container
|
||||||
@@ -27,10 +25,7 @@ if [ "${GITHUB_EVENT_NAME}" = "schedule" ]; then
|
|||||||
--cache-to=type=registry,ref=${CACHE_TAG} \
|
--cache-to=type=registry,ref=${CACHE_TAG} \
|
||||||
-f docker/Dockerfile.armhf \
|
-f docker/Dockerfile.armhf \
|
||||||
--platform ${PI_PLATFORM} \
|
--platform ${PI_PLATFORM} \
|
||||||
-t ${IMAGE_NAME}:${TAG_PI} \
|
-t ${IMAGE_NAME}:${TAG_PI} --push .
|
||||||
--push \
|
|
||||||
--provenance=false \
|
|
||||||
.
|
|
||||||
else
|
else
|
||||||
echo "event ${GITHUB_EVENT_NAME}: building with cache"
|
echo "event ${GITHUB_EVENT_NAME}: building with cache"
|
||||||
# Build regular image
|
# Build regular image
|
||||||
@@ -39,16 +34,12 @@ else
|
|||||||
|
|
||||||
# Pull last build to avoid rebuilding the whole image
|
# Pull last build to avoid rebuilding the whole image
|
||||||
# docker pull --platform ${PI_PLATFORM} ${IMAGE_NAME}:${TAG}
|
# docker pull --platform ${PI_PLATFORM} ${IMAGE_NAME}:${TAG}
|
||||||
# disable provenance due to https://github.com/docker/buildx/issues/1509
|
|
||||||
docker buildx build \
|
docker buildx build \
|
||||||
--cache-from=type=registry,ref=${CACHE_TAG} \
|
--cache-from=type=registry,ref=${CACHE_TAG} \
|
||||||
--cache-to=type=registry,ref=${CACHE_TAG} \
|
--cache-to=type=registry,ref=${CACHE_TAG} \
|
||||||
-f docker/Dockerfile.armhf \
|
-f docker/Dockerfile.armhf \
|
||||||
--platform ${PI_PLATFORM} \
|
--platform ${PI_PLATFORM} \
|
||||||
-t ${IMAGE_NAME}:${TAG_PI} \
|
-t ${IMAGE_NAME}:${TAG_PI} --push .
|
||||||
--push \
|
|
||||||
--provenance=false \
|
|
||||||
.
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
@@ -58,16 +49,14 @@ fi
|
|||||||
# Tag image for upload and next build step
|
# Tag image for upload and next build step
|
||||||
docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG
|
docker tag freqtrade:$TAG ${CACHE_IMAGE}:$TAG
|
||||||
|
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot .
|
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_PLOT} -f docker/Dockerfile.plot .
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai .
|
docker build --cache-from freqtrade:${TAG} --build-arg sourceimage=${CACHE_IMAGE} --build-arg sourcetag=${TAG} -t freqtrade:${TAG_FREQAI} -f docker/Dockerfile.freqai .
|
||||||
docker build --build-arg sourceimage=freqtrade --build-arg sourcetag=${TAG_FREQAI} -t freqtrade:${TAG_FREQAI_RL} -f docker/Dockerfile.freqai_rl .
|
|
||||||
|
|
||||||
docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT
|
docker tag freqtrade:$TAG_PLOT ${CACHE_IMAGE}:$TAG_PLOT
|
||||||
docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI
|
docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI
|
||||||
docker tag freqtrade:$TAG_FREQAI_RL ${CACHE_IMAGE}:$TAG_FREQAI_RL
|
|
||||||
|
|
||||||
# Run backtest
|
# Run backtest
|
||||||
docker run --rm -v $(pwd)/tests/testdata/config.tests.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "failed running backtest"
|
echo "failed running backtest"
|
||||||
@@ -76,10 +65,11 @@ fi
|
|||||||
|
|
||||||
docker images
|
docker images
|
||||||
|
|
||||||
docker push ${CACHE_IMAGE}:$TAG
|
docker push ${CACHE_IMAGE}
|
||||||
docker push ${CACHE_IMAGE}:$TAG_PLOT
|
docker push ${CACHE_IMAGE}:$TAG_PLOT
|
||||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI
|
docker push ${CACHE_IMAGE}:$TAG_FREQAI
|
||||||
docker push ${CACHE_IMAGE}:$TAG_FREQAI_RL
|
docker push ${CACHE_IMAGE}:$TAG
|
||||||
|
|
||||||
|
|
||||||
docker images
|
docker images
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
{
|
{
|
||||||
"$schema": "https://schema.freqtrade.io/schema.json",
|
|
||||||
"max_open_trades": 3,
|
"max_open_trades": 3,
|
||||||
"stake_currency": "USDT",
|
"stake_currency": "BTC",
|
||||||
"stake_amount": 0.05,
|
"stake_amount": 0.05,
|
||||||
"tradable_balance_ratio": 0.99,
|
"tradable_balance_ratio": 0.99,
|
||||||
"fiat_display_currency": "USD",
|
"fiat_display_currency": "USD",
|
||||||
@@ -37,29 +36,43 @@
|
|||||||
"ccxt_async_config": {
|
"ccxt_async_config": {
|
||||||
},
|
},
|
||||||
"pair_whitelist": [
|
"pair_whitelist": [
|
||||||
"ALGO/USDT",
|
"ALGO/BTC",
|
||||||
"ATOM/USDT",
|
"ATOM/BTC",
|
||||||
"BAT/USDT",
|
"BAT/BTC",
|
||||||
"BCH/USDT",
|
"BCH/BTC",
|
||||||
"BRD/USDT",
|
"BRD/BTC",
|
||||||
"EOS/USDT",
|
"EOS/BTC",
|
||||||
"ETH/USDT",
|
"ETH/BTC",
|
||||||
"IOTA/USDT",
|
"IOTA/BTC",
|
||||||
"LINK/USDT",
|
"LINK/BTC",
|
||||||
"LTC/USDT",
|
"LTC/BTC",
|
||||||
"NEO/USDT",
|
"NEO/BTC",
|
||||||
"NXS/USDT",
|
"NXS/BTC",
|
||||||
"XMR/USDT",
|
"XMR/BTC",
|
||||||
"XRP/USDT",
|
"XRP/BTC",
|
||||||
"XTZ/USDT"
|
"XTZ/BTC"
|
||||||
],
|
],
|
||||||
"pair_blacklist": [
|
"pair_blacklist": [
|
||||||
"BNB/.*"
|
"BNB/BTC"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"pairlists": [
|
"pairlists": [
|
||||||
{"method": "StaticPairList"}
|
{"method": "StaticPairList"}
|
||||||
],
|
],
|
||||||
|
"edge": {
|
||||||
|
"enabled": false,
|
||||||
|
"process_throttle_secs": 3600,
|
||||||
|
"calculate_since_number_of_days": 7,
|
||||||
|
"allowed_risk": 0.01,
|
||||||
|
"stoploss_range_min": -0.01,
|
||||||
|
"stoploss_range_max": -0.1,
|
||||||
|
"stoploss_range_step": -0.01,
|
||||||
|
"minimum_winrate": 0.60,
|
||||||
|
"minimum_expectancy": 0.20,
|
||||||
|
"min_trade_number": 10,
|
||||||
|
"max_trade_duration_minute": 1440,
|
||||||
|
"remove_pumps": false
|
||||||
|
},
|
||||||
"telegram": {
|
"telegram": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
"token": "your_telegram_token",
|
"token": "your_telegram_token",
|
||||||
|
|||||||
@@ -29,11 +29,14 @@
|
|||||||
"order_book_top": 1
|
"order_book_top": 1
|
||||||
},
|
},
|
||||||
"exchange": {
|
"exchange": {
|
||||||
"name": "binance",
|
"name": "bittrex",
|
||||||
"key": "your_exchange_key",
|
"key": "your_exchange_key",
|
||||||
"secret": "your_exchange_secret",
|
"secret": "your_exchange_secret",
|
||||||
"ccxt_config": {},
|
"ccxt_config": {"enableRateLimit": true},
|
||||||
"ccxt_async_config": {},
|
"ccxt_async_config": {
|
||||||
|
"enableRateLimit": true,
|
||||||
|
"rateLimit": 500
|
||||||
|
},
|
||||||
"pair_whitelist": [
|
"pair_whitelist": [
|
||||||
"ETH/BTC",
|
"ETH/BTC",
|
||||||
"LTC/BTC",
|
"LTC/BTC",
|
||||||
@@ -53,6 +56,20 @@
|
|||||||
"pairlists": [
|
"pairlists": [
|
||||||
{"method": "StaticPairList"}
|
{"method": "StaticPairList"}
|
||||||
],
|
],
|
||||||
|
"edge": {
|
||||||
|
"enabled": false,
|
||||||
|
"process_throttle_secs": 3600,
|
||||||
|
"calculate_since_number_of_days": 7,
|
||||||
|
"allowed_risk": 0.01,
|
||||||
|
"stoploss_range_min": -0.01,
|
||||||
|
"stoploss_range_max": -0.1,
|
||||||
|
"stoploss_range_step": -0.01,
|
||||||
|
"minimum_winrate": 0.60,
|
||||||
|
"minimum_expectancy": 0.20,
|
||||||
|
"min_trade_number": 10,
|
||||||
|
"max_trade_duration_minute": 1440,
|
||||||
|
"remove_pumps": false
|
||||||
|
},
|
||||||
"telegram": {
|
"telegram": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
"token": "your_telegram_token",
|
"token": "your_telegram_token",
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
{
|
{
|
||||||
"$schema": "https://schema.freqtrade.io/schema.json",
|
|
||||||
"trading_mode": "futures",
|
"trading_mode": "futures",
|
||||||
"margin_mode": "isolated",
|
"margin_mode": "isolated",
|
||||||
"max_open_trades": 5,
|
"max_open_trades": 5,
|
||||||
@@ -19,11 +18,16 @@
|
|||||||
"name": "binance",
|
"name": "binance",
|
||||||
"key": "",
|
"key": "",
|
||||||
"secret": "",
|
"secret": "",
|
||||||
"ccxt_config": {},
|
"ccxt_config": {
|
||||||
"ccxt_async_config": {},
|
"enableRateLimit": true
|
||||||
|
},
|
||||||
|
"ccxt_async_config": {
|
||||||
|
"enableRateLimit": true,
|
||||||
|
"rateLimit": 200
|
||||||
|
},
|
||||||
"pair_whitelist": [
|
"pair_whitelist": [
|
||||||
"1INCH/USDT:USDT",
|
"1INCH/USDT",
|
||||||
"ALGO/USDT:USDT"
|
"ALGO/USDT"
|
||||||
],
|
],
|
||||||
"pair_blacklist": []
|
"pair_blacklist": []
|
||||||
},
|
},
|
||||||
@@ -49,11 +53,11 @@
|
|||||||
],
|
],
|
||||||
"freqai": {
|
"freqai": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"purge_old_models": 2,
|
"purge_old_models": true,
|
||||||
"train_period_days": 15,
|
"train_period_days": 15,
|
||||||
"backtest_period_days": 7,
|
"backtest_period_days": 7,
|
||||||
"live_retrain_hours": 0,
|
"live_retrain_hours": 0,
|
||||||
"identifier": "unique-id",
|
"identifier": "uniqe-id",
|
||||||
"feature_parameters": {
|
"feature_parameters": {
|
||||||
"include_timeframes": [
|
"include_timeframes": [
|
||||||
"3m",
|
"3m",
|
||||||
@@ -61,8 +65,8 @@
|
|||||||
"1h"
|
"1h"
|
||||||
],
|
],
|
||||||
"include_corr_pairlist": [
|
"include_corr_pairlist": [
|
||||||
"BTC/USDT:USDT",
|
"BTC/USDT",
|
||||||
"ETH/USDT:USDT"
|
"ETH/USDT"
|
||||||
],
|
],
|
||||||
"label_period_candles": 20,
|
"label_period_candles": 20,
|
||||||
"include_shifted_candles": 2,
|
"include_shifted_candles": 2,
|
||||||
@@ -80,7 +84,9 @@
|
|||||||
"test_size": 0.33,
|
"test_size": 0.33,
|
||||||
"random_state": 1
|
"random_state": 1
|
||||||
},
|
},
|
||||||
"model_training_parameters": {}
|
"model_training_parameters": {
|
||||||
|
"n_estimators": 1000
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"bot_name": "",
|
"bot_name": "",
|
||||||
"force_entry_enable": true,
|
"force_entry_enable": true,
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
{
|
{
|
||||||
"max_open_trades": 3,
|
"max_open_trades": 3,
|
||||||
"stake_currency": "BTC",
|
"stake_currency": "USD",
|
||||||
"stake_amount": 0.05,
|
"stake_amount": 50,
|
||||||
"tradable_balance_ratio": 0.99,
|
"tradable_balance_ratio": 0.99,
|
||||||
"fiat_display_currency": "USD",
|
"fiat_display_currency": "USD",
|
||||||
"timeframe": "5m",
|
"timeframe": "5m",
|
||||||
"dry_run": true,
|
"dry_run": true,
|
||||||
"cancel_open_orders_on_exit": false,
|
"cancel_open_orders_on_exit": false,
|
||||||
"unfilledtimeout": {
|
"unfilledtimeout": {
|
||||||
"entry": 5,
|
"entry": 10,
|
||||||
"exit": 5,
|
"exit": 10,
|
||||||
"exit_timeout_count": 0,
|
"exit_timeout_count": 0,
|
||||||
"unit": "minutes"
|
"unit": "minutes"
|
||||||
},
|
},
|
||||||
@@ -29,27 +29,49 @@
|
|||||||
"order_book_top": 1
|
"order_book_top": 1
|
||||||
},
|
},
|
||||||
"exchange": {
|
"exchange": {
|
||||||
"name": "gate",
|
"name": "ftx",
|
||||||
"key": "your_exchange_key",
|
"key": "your_exchange_key",
|
||||||
"secret": "your_exchange_secret",
|
"secret": "your_exchange_secret",
|
||||||
"ccxt_config": {},
|
"ccxt_config": {},
|
||||||
"ccxt_async_config": {},
|
"ccxt_async_config": {},
|
||||||
"pair_whitelist": [
|
"pair_whitelist": [
|
||||||
"ETH/BTC",
|
"BTC/USD",
|
||||||
"LTC/BTC",
|
"ETH/USD",
|
||||||
"ETC/BTC",
|
"BNB/USD",
|
||||||
"XLM/BTC",
|
"USDT/USD",
|
||||||
"XRP/BTC",
|
"LTC/USD",
|
||||||
"ADA/BTC",
|
"SRM/USD",
|
||||||
"DOT/BTC"
|
"SXP/USD",
|
||||||
|
"XRP/USD",
|
||||||
|
"DOGE/USD",
|
||||||
|
"1INCH/USD",
|
||||||
|
"CHZ/USD",
|
||||||
|
"MATIC/USD",
|
||||||
|
"LINK/USD",
|
||||||
|
"OXY/USD",
|
||||||
|
"SUSHI/USD"
|
||||||
],
|
],
|
||||||
"pair_blacklist": [
|
"pair_blacklist": [
|
||||||
"DOGE/BTC"
|
"FTT/USD"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"pairlists": [
|
"pairlists": [
|
||||||
{"method": "StaticPairList"}
|
{"method": "StaticPairList"}
|
||||||
],
|
],
|
||||||
|
"edge": {
|
||||||
|
"enabled": false,
|
||||||
|
"process_throttle_secs": 3600,
|
||||||
|
"calculate_since_number_of_days": 7,
|
||||||
|
"allowed_risk": 0.01,
|
||||||
|
"stoploss_range_min": -0.01,
|
||||||
|
"stoploss_range_max": -0.1,
|
||||||
|
"stoploss_range_step": -0.01,
|
||||||
|
"minimum_winrate": 0.60,
|
||||||
|
"minimum_expectancy": 0.20,
|
||||||
|
"min_trade_number": 10,
|
||||||
|
"max_trade_duration_minute": 1440,
|
||||||
|
"remove_pumps": false
|
||||||
|
},
|
||||||
"telegram": {
|
"telegram": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
"token": "your_telegram_token",
|
"token": "your_telegram_token",
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
{
|
{
|
||||||
"$schema": "https://schema.freqtrade.io/schema.json",
|
|
||||||
"max_open_trades": 3,
|
"max_open_trades": 3,
|
||||||
"stake_currency": "BTC",
|
"stake_currency": "BTC",
|
||||||
"stake_amount": 0.05,
|
"stake_amount": 0.05,
|
||||||
@@ -61,7 +60,6 @@
|
|||||||
"force_entry": "market",
|
"force_entry": "market",
|
||||||
"stoploss": "market",
|
"stoploss": "market",
|
||||||
"stoploss_on_exchange": false,
|
"stoploss_on_exchange": false,
|
||||||
"stoploss_price_type": "last",
|
|
||||||
"stoploss_on_exchange_interval": 60,
|
"stoploss_on_exchange_interval": 60,
|
||||||
"stoploss_on_exchange_limit_ratio": 0.99
|
"stoploss_on_exchange_limit_ratio": 0.99
|
||||||
},
|
},
|
||||||
@@ -71,7 +69,6 @@
|
|||||||
},
|
},
|
||||||
"pairlists": [
|
"pairlists": [
|
||||||
{"method": "StaticPairList"},
|
{"method": "StaticPairList"},
|
||||||
{"method": "FullTradesFilter"},
|
|
||||||
{
|
{
|
||||||
"method": "VolumePairList",
|
"method": "VolumePairList",
|
||||||
"number_assets": 20,
|
"number_assets": 20,
|
||||||
@@ -91,6 +88,7 @@
|
|||||||
],
|
],
|
||||||
"exchange": {
|
"exchange": {
|
||||||
"name": "binance",
|
"name": "binance",
|
||||||
|
"sandbox": false,
|
||||||
"key": "your_exchange_key",
|
"key": "your_exchange_key",
|
||||||
"secret": "your_exchange_secret",
|
"secret": "your_exchange_secret",
|
||||||
"password": "",
|
"password": "",
|
||||||
@@ -206,7 +204,6 @@
|
|||||||
"strategy_path": "user_data/strategies/",
|
"strategy_path": "user_data/strategies/",
|
||||||
"recursive_strategy_search": false,
|
"recursive_strategy_search": false,
|
||||||
"add_config_files": [],
|
"add_config_files": [],
|
||||||
"reduce_df_footprint": false,
|
"dataformat_ohlcv": "json",
|
||||||
"dataformat_ohlcv": "feather",
|
"dataformat_trades": "jsongz"
|
||||||
"dataformat_trades": "feather"
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
{
|
{
|
||||||
"$schema": "https://schema.freqtrade.io/schema.json",
|
|
||||||
"max_open_trades": 5,
|
"max_open_trades": 5,
|
||||||
"stake_currency": "EUR",
|
"stake_currency": "EUR",
|
||||||
"stake_amount": 10,
|
"stake_amount": 10,
|
||||||
@@ -65,6 +64,20 @@
|
|||||||
"pairlists": [
|
"pairlists": [
|
||||||
{"method": "StaticPairList"}
|
{"method": "StaticPairList"}
|
||||||
],
|
],
|
||||||
|
"edge": {
|
||||||
|
"enabled": false,
|
||||||
|
"process_throttle_secs": 3600,
|
||||||
|
"calculate_since_number_of_days": 7,
|
||||||
|
"allowed_risk": 0.01,
|
||||||
|
"stoploss_range_min": -0.01,
|
||||||
|
"stoploss_range_max": -0.1,
|
||||||
|
"stoploss_range_step": -0.01,
|
||||||
|
"minimum_winrate": 0.60,
|
||||||
|
"minimum_expectancy": 0.20,
|
||||||
|
"min_trade_number": 10,
|
||||||
|
"max_trade_duration_minute": 1440,
|
||||||
|
"remove_pumps": false
|
||||||
|
},
|
||||||
"telegram": {
|
"telegram": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
"token": "your_telegram_token",
|
"token": "your_telegram_token",
|
||||||
|
|||||||
@@ -1,19 +1,11 @@
|
|||||||
---
|
---
|
||||||
|
version: '3'
|
||||||
services:
|
services:
|
||||||
freqtrade:
|
freqtrade:
|
||||||
image: freqtradeorg/freqtrade:stable
|
image: freqtradeorg/freqtrade:stable
|
||||||
# image: freqtradeorg/freqtrade:develop
|
# image: freqtradeorg/freqtrade:develop
|
||||||
# Use plotting image
|
# Use plotting image
|
||||||
# image: freqtradeorg/freqtrade:develop_plot
|
# image: freqtradeorg/freqtrade:develop_plot
|
||||||
# # Enable GPU Image and GPU Resources (only relevant for freqAI)
|
|
||||||
# # Make sure to uncomment the whole deploy section
|
|
||||||
# deploy:
|
|
||||||
# resources:
|
|
||||||
# reservations:
|
|
||||||
# devices:
|
|
||||||
# - driver: nvidia
|
|
||||||
# count: 1
|
|
||||||
# capabilities: [gpu]
|
|
||||||
# Build step - only needed when additional dependencies are needed
|
# Build step - only needed when additional dependencies are needed
|
||||||
# build:
|
# build:
|
||||||
# context: .
|
# context: .
|
||||||
@@ -24,7 +16,7 @@ services:
|
|||||||
- "./user_data:/freqtrade/user_data"
|
- "./user_data:/freqtrade/user_data"
|
||||||
# Expose api on port 8080 (localhost only)
|
# Expose api on port 8080 (localhost only)
|
||||||
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
|
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
|
||||||
# for more information.
|
# before enabling this.
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8080:8080"
|
- "127.0.0.1:8080:8080"
|
||||||
# Default command used when running `docker compose up`
|
# Default command used when running `docker compose up`
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM python:3.11.10-slim-bookworm as base
|
FROM python:3.9.12-slim-bullseye as base
|
||||||
|
|
||||||
# Setup env
|
# Setup env
|
||||||
ENV LANG C.UTF-8
|
ENV LANG C.UTF-8
|
||||||
@@ -11,13 +11,12 @@ ENV FT_APP_ENV="docker"
|
|||||||
# Prepare environment
|
# Prepare environment
|
||||||
RUN mkdir /freqtrade \
|
RUN mkdir /freqtrade \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get -y install sudo libatlas3-base libopenblas-dev curl sqlite3 libhdf5-dev libutf8proc-dev libsnappy-dev \
|
&& apt-get -y install sudo libatlas3-base curl sqlite3 libhdf5-dev \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& useradd -u 1000 -G sudo -U -m ftuser \
|
&& useradd -u 1000 -G sudo -U -m ftuser \
|
||||||
&& chown ftuser:ftuser /freqtrade \
|
&& chown ftuser:ftuser /freqtrade \
|
||||||
# Allow sudoers
|
# Allow sudoers
|
||||||
&& echo "ftuser ALL=(ALL) NOPASSWD: /bin/chown" >> /etc/sudoers \
|
&& echo "ftuser ALL=(ALL) NOPASSWD: /bin/chown" >> /etc/sudoers
|
||||||
&& pip install --upgrade pip
|
|
||||||
|
|
||||||
WORKDIR /freqtrade
|
WORKDIR /freqtrade
|
||||||
|
|
||||||
@@ -26,16 +25,18 @@ FROM base as python-deps
|
|||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get -y install build-essential libssl-dev libffi-dev libgfortran5 pkg-config cmake gcc \
|
&& apt-get -y install build-essential libssl-dev libffi-dev libgfortran5 pkg-config cmake gcc \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
|
&& pip install --upgrade pip \
|
||||||
&& echo "[global]\nextra-index-url=https://www.piwheels.org/simple" > /etc/pip.conf
|
&& echo "[global]\nextra-index-url=https://www.piwheels.org/simple" > /etc/pip.conf
|
||||||
|
|
||||||
# Install TA-lib
|
# Install TA-lib
|
||||||
COPY build_helpers/* /tmp/
|
COPY build_helpers/* /tmp/
|
||||||
|
RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib*
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/lib
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
COPY --chown=ftuser:ftuser requirements.txt /freqtrade/
|
COPY --chown=ftuser:ftuser requirements.txt /freqtrade/
|
||||||
USER ftuser
|
USER ftuser
|
||||||
RUN pip install --user --no-cache-dir numpy \
|
RUN pip install --user --no-cache-dir numpy \
|
||||||
&& pip install --user --no-index --find-links /tmp/ pyarrow TA-Lib \
|
|
||||||
&& pip install --user --no-cache-dir -r requirements.txt
|
&& pip install --user --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
# Copy dependencies to runtime-image
|
# Copy dependencies to runtime-image
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
ARG sourceimage=freqtradeorg/freqtrade
|
|
||||||
ARG sourcetag=develop_freqai
|
|
||||||
FROM ${sourceimage}:${sourcetag}
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
COPY requirements-freqai.txt requirements-freqai-rl.txt /freqtrade/
|
|
||||||
|
|
||||||
RUN pip install -r requirements-freqai-rl.txt --user --no-cache-dir
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
FROM freqtradeorg/freqtrade:develop_plot
|
FROM freqtradeorg/freqtrade:develop_plot
|
||||||
|
|
||||||
|
|
||||||
# Pin prompt-toolkit to avoid questionary version conflict
|
# Pin jupyter-client to avoid tornado version conflict
|
||||||
RUN pip install jupyterlab "prompt-toolkit<=3.0.36" jupyter-client --user --no-cache-dir
|
RUN pip install jupyterlab jupyter-client==7.3.4 --user --no-cache-dir
|
||||||
|
|
||||||
# Empty the ENTRYPOINT to allow all commands
|
# Empty the ENTRYPOINT to allow all commands
|
||||||
ENTRYPOINT []
|
ENTRYPOINT []
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
---
|
|
||||||
services:
|
|
||||||
freqtrade:
|
|
||||||
image: freqtradeorg/freqtrade:stable_freqaitorch
|
|
||||||
# # Enable GPU Image and GPU Resources
|
|
||||||
# # Make sure to uncomment the whole deploy section
|
|
||||||
# deploy:
|
|
||||||
# resources:
|
|
||||||
# reservations:
|
|
||||||
# devices:
|
|
||||||
# - driver: nvidia
|
|
||||||
# count: 1
|
|
||||||
# capabilities: [gpu]
|
|
||||||
|
|
||||||
# Build step - only needed when additional dependencies are needed
|
|
||||||
# build:
|
|
||||||
# context: .
|
|
||||||
# dockerfile: "./docker/Dockerfile.custom"
|
|
||||||
restart: unless-stopped
|
|
||||||
container_name: freqtrade
|
|
||||||
volumes:
|
|
||||||
- "./user_data:/freqtrade/user_data"
|
|
||||||
# Expose api on port 8080 (localhost only)
|
|
||||||
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
|
|
||||||
# for more information.
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8080:8080"
|
|
||||||
# Default command used when running `docker compose up`
|
|
||||||
command: >
|
|
||||||
trade
|
|
||||||
--logfile /freqtrade/user_data/logs/freqtrade.log
|
|
||||||
--db-url sqlite:////freqtrade/user_data/tradesv3.sqlite
|
|
||||||
--config /freqtrade/user_data/config.json
|
|
||||||
--freqaimodel XGBoostRegressor
|
|
||||||
--strategy FreqaiExampleStrategy
|
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
---
|
---
|
||||||
|
version: '3'
|
||||||
services:
|
services:
|
||||||
ft_jupyterlab:
|
ft_jupyterlab:
|
||||||
build:
|
build:
|
||||||
context: ..
|
context: ..
|
||||||
dockerfile: docker/Dockerfile.jupyter
|
dockerfile: docker/Dockerfile.jupyter
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
# container_name: freqtrade
|
container_name: freqtrade
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8888:8888"
|
- "127.0.0.1:8888:8888"
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
BIN
docs/JOSS_paper/assets/freqai_algo.jpg
Normal file
|
After Width: | Height: | Size: 345 KiB |
BIN
docs/JOSS_paper/assets/freqai_algorithm-diagram.jpg
Normal file
|
After Width: | Height: | Size: 490 KiB |
15
docs/JOSS_paper/note_to_editors.txt
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
Dear Editors,
|
||||||
|
We present a paper for ``FreqAI`` a machine learning sandbox for researchers and citizen scientists alike.
|
||||||
|
There are a large number of authors, however all have contributed in a significant way to this paper.
|
||||||
|
For clarity the contribution of each author is outlined:
|
||||||
|
|
||||||
|
- Robert Caulk : Conception and software development
|
||||||
|
- Elin Tornquist : Theoretical brainstorming, data analysis, tool dev
|
||||||
|
- Matthias Voppichler : Software architecture and code review
|
||||||
|
- Andrew R. Lawless : Extensive testing, feature brainstorming
|
||||||
|
- Ryan McMullan : Extensive testing, feature brainstorming
|
||||||
|
- Wagner Costa Santos : Major backtesting developments, extensive testing
|
||||||
|
- Pascal Schmidt : Extensive testing, feature brainstorming
|
||||||
|
- Timothy C. Pogue : Webhooks forecast sharing
|
||||||
|
- Stefan P. Gehring : Extensive testing, feature brainstorming
|
||||||
|
- Johan van der Vlugt : Extensive testing, feature brainstorming
|
||||||
207
docs/JOSS_paper/paper.bib
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
@article{scikit-learn,
|
||||||
|
title={Scikit-learn: Machine Learning in {P}ython},
|
||||||
|
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
|
||||||
|
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
|
||||||
|
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
|
||||||
|
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
|
||||||
|
journal={Journal of Machine Learning Research},
|
||||||
|
volume={12},
|
||||||
|
pages={2825--2830},
|
||||||
|
year={2011}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{catboost,
|
||||||
|
author = {Prokhorenkova, Liudmila and Gusev, Gleb and Vorobev, Aleksandr and Dorogush, Anna Veronika and Gulin, Andrey},
|
||||||
|
title = {CatBoost: Unbiased Boosting with Categorical Features},
|
||||||
|
year = {2018},
|
||||||
|
publisher = {Curran Associates Inc.},
|
||||||
|
address = {Red Hook, NY, USA},
|
||||||
|
abstract = {This paper presents the key algorithmic techniques behind CatBoost, a new gradient boosting toolkit. Their combination leads to CatBoost outperforming other publicly available boosting implementations in terms of quality on a variety of datasets. Two critical algorithmic advances introduced in CatBoost are the implementation of ordered boosting, a permutation-driven alternative to the classic algorithm, and an innovative algorithm for processing categorical features. Both techniques were created to fight a prediction shift caused by a special kind of target leakage present in all currently existing implementations of gradient boosting algorithms. In this paper, we provide a detailed analysis of this problem and demonstrate that proposed algorithms solve it effectively, leading to excellent empirical results.},
|
||||||
|
booktitle = {Proceedings of the 32nd International Conference on Neural Information Processing Systems},
|
||||||
|
pages = {6639–6649},
|
||||||
|
numpages = {11},
|
||||||
|
location = {Montr\'{e}al, Canada},
|
||||||
|
series = {NIPS'18}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@article{lightgbm,
|
||||||
|
title={Lightgbm: A highly efficient gradient boosting decision tree},
|
||||||
|
author={Ke, Guolin and Meng, Qi and Finley, Thomas and Wang, Taifeng and Chen, Wei and Ma, Weidong and Ye, Qiwei and Liu, Tie-Yan},
|
||||||
|
journal={Advances in neural information processing systems},
|
||||||
|
volume={30},
|
||||||
|
pages={3146--3154},
|
||||||
|
year={2017}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{xgboost,
|
||||||
|
author = {Chen, Tianqi and Guestrin, Carlos},
|
||||||
|
title = {{XGBoost}: A Scalable Tree Boosting System},
|
||||||
|
booktitle = {Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
|
||||||
|
series = {KDD '16},
|
||||||
|
year = {2016},
|
||||||
|
isbn = {978-1-4503-4232-2},
|
||||||
|
location = {San Francisco, California, USA},
|
||||||
|
pages = {785--794},
|
||||||
|
numpages = {10},
|
||||||
|
url = {http://doi.acm.org/10.1145/2939672.2939785},
|
||||||
|
doi = {10.1145/2939672.2939785},
|
||||||
|
acmid = {2939785},
|
||||||
|
publisher = {ACM},
|
||||||
|
address = {New York, NY, USA},
|
||||||
|
keywords = {large-scale machine learning},
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{stable-baselines3,
|
||||||
|
author = {Antonin Raffin and Ashley Hill and Adam Gleave and Anssi Kanervisto and Maximilian Ernestus and Noah Dormann},
|
||||||
|
title = {Stable-Baselines3: Reliable Reinforcement Learning Implementations},
|
||||||
|
journal = {Journal of Machine Learning Research},
|
||||||
|
year = {2021},
|
||||||
|
volume = {22},
|
||||||
|
number = {268},
|
||||||
|
pages = {1-8},
|
||||||
|
url = {http://jmlr.org/papers/v22/20-1364.html}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{openai,
|
||||||
|
title={OpenAI Gym},
|
||||||
|
author={Greg Brockman and Vicki Cheung and Ludwig Pettersson and Jonas Schneider and John Schulman and Jie Tang and Wojciech Zaremba},
|
||||||
|
year={2016},
|
||||||
|
eprint={1606.01540},
|
||||||
|
archivePrefix={arXiv},
|
||||||
|
primaryClass={cs.LG}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{tensorflow,
|
||||||
|
title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
|
||||||
|
url={https://www.tensorflow.org/},
|
||||||
|
note={Software available from tensorflow.org},
|
||||||
|
author={
|
||||||
|
Mart\'{i}n~Abadi and
|
||||||
|
Ashish~Agarwal and
|
||||||
|
Paul~Barham and
|
||||||
|
Eugene~Brevdo and
|
||||||
|
Zhifeng~Chen and
|
||||||
|
Craig~Citro and
|
||||||
|
Greg~S.~Corrado and
|
||||||
|
Andy~Davis and
|
||||||
|
Jeffrey~Dean and
|
||||||
|
Matthieu~Devin and
|
||||||
|
Sanjay~Ghemawat and
|
||||||
|
Ian~Goodfellow and
|
||||||
|
Andrew~Harp and
|
||||||
|
Geoffrey~Irving and
|
||||||
|
Michael~Isard and
|
||||||
|
Yangqing Jia and
|
||||||
|
Rafal~Jozefowicz and
|
||||||
|
Lukasz~Kaiser and
|
||||||
|
Manjunath~Kudlur and
|
||||||
|
Josh~Levenberg and
|
||||||
|
Dandelion~Man\'{e} and
|
||||||
|
Rajat~Monga and
|
||||||
|
Sherry~Moore and
|
||||||
|
Derek~Murray and
|
||||||
|
Chris~Olah and
|
||||||
|
Mike~Schuster and
|
||||||
|
Jonathon~Shlens and
|
||||||
|
Benoit~Steiner and
|
||||||
|
Ilya~Sutskever and
|
||||||
|
Kunal~Talwar and
|
||||||
|
Paul~Tucker and
|
||||||
|
Vincent~Vanhoucke and
|
||||||
|
Vijay~Vasudevan and
|
||||||
|
Fernanda~Vi\'{e}gas and
|
||||||
|
Oriol~Vinyals and
|
||||||
|
Pete~Warden and
|
||||||
|
Martin~Wattenberg and
|
||||||
|
Martin~Wicke and
|
||||||
|
Yuan~Yu and
|
||||||
|
Xiaoqiang~Zheng},
|
||||||
|
year={2015},
|
||||||
|
}
|
||||||
|
|
||||||
|
@incollection{pytorch,
|
||||||
|
title = {PyTorch: An Imperative Style, High-Performance Deep Learning Library},
|
||||||
|
author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith},
|
||||||
|
booktitle = {Advances in Neural Information Processing Systems 32},
|
||||||
|
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
|
||||||
|
pages = {8024--8035},
|
||||||
|
year = {2019},
|
||||||
|
publisher = {Curran Associates, Inc.},
|
||||||
|
url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ARTICLE{scipy,
|
||||||
|
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
|
||||||
|
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
|
||||||
|
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
|
||||||
|
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
|
||||||
|
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
|
||||||
|
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
|
||||||
|
Kern, Robert and Larson, Eric and Carey, C J and
|
||||||
|
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
|
||||||
|
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
|
||||||
|
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
|
||||||
|
Harris, Charles R. and Archibald, Anne M. and
|
||||||
|
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
|
||||||
|
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
|
||||||
|
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
|
||||||
|
Computing in Python}},
|
||||||
|
journal = {Nature Methods},
|
||||||
|
year = {2020},
|
||||||
|
volume = {17},
|
||||||
|
pages = {261--272},
|
||||||
|
adsurl = {https://rdcu.be/b08Wh},
|
||||||
|
doi = {10.1038/s41592-019-0686-2},
|
||||||
|
}
|
||||||
|
|
||||||
|
@Article{numpy,
|
||||||
|
title = {Array programming with {NumPy}},
|
||||||
|
author = {Charles R. Harris and K. Jarrod Millman and St{\'{e}}fan J.
|
||||||
|
van der Walt and Ralf Gommers and Pauli Virtanen and David
|
||||||
|
Cournapeau and Eric Wieser and Julian Taylor and Sebastian
|
||||||
|
Berg and Nathaniel J. Smith and Robert Kern and Matti Picus
|
||||||
|
and Stephan Hoyer and Marten H. van Kerkwijk and Matthew
|
||||||
|
Brett and Allan Haldane and Jaime Fern{\'{a}}ndez del
|
||||||
|
R{\'{i}}o and Mark Wiebe and Pearu Peterson and Pierre
|
||||||
|
G{\'{e}}rard-Marchant and Kevin Sheppard and Tyler Reddy and
|
||||||
|
Warren Weckesser and Hameer Abbasi and Christoph Gohlke and
|
||||||
|
Travis E. Oliphant},
|
||||||
|
year = {2020},
|
||||||
|
month = sep,
|
||||||
|
journal = {Nature},
|
||||||
|
volume = {585},
|
||||||
|
number = {7825},
|
||||||
|
pages = {357--362},
|
||||||
|
doi = {10.1038/s41586-020-2649-2},
|
||||||
|
publisher = {Springer Science and Business Media {LLC}},
|
||||||
|
url = {https://doi.org/10.1038/s41586-020-2649-2}
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{pandas,
|
||||||
|
title={Data structures for statistical computing in python},
|
||||||
|
author={McKinney, Wes and others},
|
||||||
|
booktitle={Proceedings of the 9th Python in Science Conference},
|
||||||
|
volume={445},
|
||||||
|
pages={51--56},
|
||||||
|
year={2010},
|
||||||
|
organization={Austin, TX},
|
||||||
|
doi={10.25080/Majora-92bf1922-00a}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@online{finrl,
|
||||||
|
title = {AI4Finance-Foundation},
|
||||||
|
year = 2022,
|
||||||
|
url = {https://github.com/AI4Finance-Foundation/FinRL},
|
||||||
|
urldate = {2022-09-30}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@online{tensortrade,
|
||||||
|
title = {tensortrade},
|
||||||
|
year = 2022,
|
||||||
|
url = {https://tensortradex.readthedocs.io/en/latest/L},
|
||||||
|
urldate = {2022-09-30}
|
||||||
|
}
|
||||||
941
docs/JOSS_paper/paper.jats
Normal file
@@ -0,0 +1,941 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8" ?>
|
||||||
|
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.2 20190208//EN"
|
||||||
|
"JATS-publishing1.dtd">
|
||||||
|
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="1.2" article-type="other">
|
||||||
|
<front>
|
||||||
|
<journal-meta>
|
||||||
|
<journal-id></journal-id>
|
||||||
|
<journal-title-group>
|
||||||
|
<journal-title>Journal of Open Source Software</journal-title>
|
||||||
|
<abbrev-journal-title>JOSS</abbrev-journal-title>
|
||||||
|
</journal-title-group>
|
||||||
|
<issn publication-format="electronic">2475-9066</issn>
|
||||||
|
<publisher>
|
||||||
|
<publisher-name>Open Journals</publisher-name>
|
||||||
|
</publisher>
|
||||||
|
</journal-meta>
|
||||||
|
<article-meta>
|
||||||
|
<article-id pub-id-type="publisher-id">0</article-id>
|
||||||
|
<article-id pub-id-type="doi">N/A</article-id>
|
||||||
|
<title-group>
|
||||||
|
<article-title><monospace>FreqAI</monospace>: generalizing adaptive
|
||||||
|
modeling for chaotic time-series market forecasts</article-title>
|
||||||
|
</title-group>
|
||||||
|
<contrib-group>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<contrib-id contrib-id-type="orcid">0000-0001-5618-8629</contrib-id>
|
||||||
|
<name>
|
||||||
|
<surname>Ph.D</surname>
|
||||||
|
<given-names>Robert A. Caulk</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<contrib-id contrib-id-type="orcid">0000-0003-3289-8604</contrib-id>
|
||||||
|
<name>
|
||||||
|
<surname>Ph.D</surname>
|
||||||
|
<given-names>Elin Törnquist</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Voppichler</surname>
|
||||||
|
<given-names>Matthias</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Lawless</surname>
|
||||||
|
<given-names>Andrew R.</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>McMullan</surname>
|
||||||
|
<given-names>Ryan</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Santos</surname>
|
||||||
|
<given-names>Wagner Costa</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Pogue</surname>
|
||||||
|
<given-names>Timothy C.</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-1"/>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>van der Vlugt</surname>
|
||||||
|
<given-names>Johan</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Gehring</surname>
|
||||||
|
<given-names>Stefan P.</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<contrib contrib-type="author">
|
||||||
|
<name>
|
||||||
|
<surname>Schmidt</surname>
|
||||||
|
<given-names>Pascal</given-names>
|
||||||
|
</name>
|
||||||
|
<xref ref-type="aff" rid="aff-2"/>
|
||||||
|
</contrib>
|
||||||
|
<aff id="aff-1">
|
||||||
|
<institution-wrap>
|
||||||
|
<institution>Emergent Methods LLC, Arvada Colorado, 80005,
|
||||||
|
USA</institution>
|
||||||
|
</institution-wrap>
|
||||||
|
</aff>
|
||||||
|
<aff id="aff-2">
|
||||||
|
<institution-wrap>
|
||||||
|
<institution>Freqtrade open source project</institution>
|
||||||
|
</institution-wrap>
|
||||||
|
</aff>
|
||||||
|
</contrib-group>
|
||||||
|
<volume>¿VOL?</volume>
|
||||||
|
<issue>¿ISSUE?</issue>
|
||||||
|
<fpage>¿PAGE?</fpage>
|
||||||
|
<permissions>
|
||||||
|
<copyright-statement>Authors of papers retain copyright and release the
|
||||||
|
work under a Creative Commons Attribution 4.0 International License (CC
|
||||||
|
BY 4.0)</copyright-statement>
|
||||||
|
<copyright-year>2022</copyright-year>
|
||||||
|
<copyright-holder>The article authors</copyright-holder>
|
||||||
|
<license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
|
||||||
|
<license-p>Authors of papers retain copyright and release the work under
|
||||||
|
a Creative Commons Attribution 4.0 International License (CC BY
|
||||||
|
4.0)</license-p>
|
||||||
|
</license>
|
||||||
|
</permissions>
|
||||||
|
<kwd-group kwd-group-type="author">
|
||||||
|
<kwd>Python</kwd>
|
||||||
|
<kwd>Machine Learning</kwd>
|
||||||
|
<kwd>adaptive modeling</kwd>
|
||||||
|
<kwd>chaotic systems</kwd>
|
||||||
|
<kwd>time-series forecasting</kwd>
|
||||||
|
</kwd-group>
|
||||||
|
</article-meta>
|
||||||
|
</front>
|
||||||
|
<body>
|
||||||
|
<sec id="statement-of-need">
|
||||||
|
<title>Statement of need</title>
|
||||||
|
<p>Forecasting chaotic time-series based systems, such as
|
||||||
|
equity/cryptocurrency markets, requires a broad set of tools geared
|
||||||
|
toward testing a wide range of hypotheses. Fortunately, a recent
|
||||||
|
maturation of robust machine learning libraries
|
||||||
|
(e.g. <monospace>scikit-learn</monospace>), has opened up a wide range
|
||||||
|
of research possibilities. Scientists from a diverse range of fields
|
||||||
|
can now easily prototype their studies on an abundance of established
|
||||||
|
machine learning algorithms. Similarly, these user-friendly libraries
|
||||||
|
enable “citzen scientists” to use their basic Python skills for
|
||||||
|
data-exploration. However, leveraging these machine learning libraries
|
||||||
|
on historical and live chaotic data sources can be logistically
|
||||||
|
difficult and expensive. Additionally, robust data-collection,
|
||||||
|
storage, and handling presents a disparate challenge.
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
aims to provide a generalized and extensible open-sourced framework
|
||||||
|
geared toward live deployments of adaptive modeling for market
|
||||||
|
forecasting. The <monospace>FreqAI</monospace> framework is
|
||||||
|
effectively a sandbox for the rich world of open-source machine
|
||||||
|
learning libraries. Inside the <monospace>FreqAI</monospace> sandbox,
|
||||||
|
users find they can combine a wide variety of third-party libraries to
|
||||||
|
test creative hypotheses on a free live 24/7 chaotic data source -
|
||||||
|
cryptocurrency exchange data.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="summary">
|
||||||
|
<title>Summary</title>
|
||||||
|
<p><ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
evolved from a desire to test and compare a range of adaptive
|
||||||
|
time-series forecasting methods on chaotic data. Cryptocurrency
|
||||||
|
markets provide a unique data source since they are operational 24/7
|
||||||
|
and the data is freely available. Luckily, an existing open-source
|
||||||
|
software,
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/stable/"><monospace>Freqtrade</monospace></ext-link>,
|
||||||
|
had already matured under a range of talented developers to support
|
||||||
|
robust data collection/storage, as well as robust live environmental
|
||||||
|
interactions for standard algorithmic trading.
|
||||||
|
<monospace>Freqtrade</monospace> also provides a set of data
|
||||||
|
analysis/visualization tools for the evaluation of historical
|
||||||
|
performance as well as live environmental feedback.
|
||||||
|
<monospace>FreqAI</monospace> builds on top of
|
||||||
|
<monospace>Freqtrade</monospace> to include a user-friendly well
|
||||||
|
tested interface for integrating external machine learning libraries
|
||||||
|
for adaptive time-series forecasting. Beyond enabling the integration
|
||||||
|
of existing libraries, <monospace>FreqAI</monospace> hosts a range of
|
||||||
|
custom algorithms and methodologies aimed at improving computational
|
||||||
|
and predictive performances. Thus, <monospace>FreqAI</monospace>
|
||||||
|
contains a range of unique features which can be easily tested in
|
||||||
|
combination with all the existing Python-accessible machine learning
|
||||||
|
libraries to generate novel research on live and historical data.</p>
|
||||||
|
<p>The high-level overview of the software is depicted in Figure
|
||||||
|
1.</p>
|
||||||
|
<p><named-content content-type="image">freqai-algo</named-content>
|
||||||
|
<italic>Abstracted overview of FreqAI algorithm</italic></p>
|
||||||
|
<sec id="connecting-machine-learning-libraries">
|
||||||
|
<title>Connecting machine learning libraries</title>
|
||||||
|
<p>Although the <monospace>FreqAI</monospace> framework is designed
|
||||||
|
to accommodate any Python library in the “Model training” and
|
||||||
|
“Feature set engineering” portions of the software (Figure 1), it
|
||||||
|
already boasts a wide range of well documented examples based on
|
||||||
|
various combinations of:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>scikit-learn
|
||||||
|
(<xref alt="Pedregosa et al., 2011" rid="ref-scikit-learn" ref-type="bibr">Pedregosa
|
||||||
|
et al., 2011</xref>), Catboost
|
||||||
|
(<xref alt="Prokhorenkova et al., 2018" rid="ref-catboost" ref-type="bibr">Prokhorenkova
|
||||||
|
et al., 2018</xref>), LightGBM
|
||||||
|
(<xref alt="Ke et al., 2017" rid="ref-lightgbm" ref-type="bibr">Ke
|
||||||
|
et al., 2017</xref>), XGBoost
|
||||||
|
(<xref alt="Chen & Guestrin, 2016" rid="ref-xgboost" ref-type="bibr">Chen
|
||||||
|
& Guestrin, 2016</xref>), stable_baselines3
|
||||||
|
(<xref alt="Raffin et al., 2021" rid="ref-stable-baselines3" ref-type="bibr">Raffin
|
||||||
|
et al., 2021</xref>), openai gym
|
||||||
|
(<xref alt="Brockman et al., 2016" rid="ref-openai" ref-type="bibr">Brockman
|
||||||
|
et al., 2016</xref>), tensorflow
|
||||||
|
(<xref alt="Abadi et al., 2015" rid="ref-tensorflow" ref-type="bibr">Abadi
|
||||||
|
et al., 2015</xref>), pytorch
|
||||||
|
(<xref alt="Paszke et al., 2019" rid="ref-pytorch" ref-type="bibr">Paszke
|
||||||
|
et al., 2019</xref>), Scipy
|
||||||
|
(<xref alt="Virtanen et al., 2020" rid="ref-scipy" ref-type="bibr">Virtanen
|
||||||
|
et al., 2020</xref>), Numpy
|
||||||
|
(<xref alt="Harris et al., 2020" rid="ref-numpy" ref-type="bibr">Harris
|
||||||
|
et al., 2020</xref>), and pandas
|
||||||
|
(<xref alt="McKinney & others, 2010" rid="ref-pandas" ref-type="bibr">McKinney
|
||||||
|
& others, 2010</xref>).</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>These mature projects contain a wide range of peer-reviewed and
|
||||||
|
industry standard methods, including:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Regression, Classification, Neural Networks, Reinforcement
|
||||||
|
Learning, Support Vector Machines, Principal Component Analysis,
|
||||||
|
point clustering, and much more.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>which are all leveraged in <monospace>FreqAI</monospace> for
|
||||||
|
users to use as templates or extend with their own methods.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="furnishing-novel-methods-and-features">
|
||||||
|
<title>Furnishing novel methods and features</title>
|
||||||
|
<p>Beyond the industry standard methods available through external
|
||||||
|
libraries - <monospace>FreqAI</monospace> includes novel methods
|
||||||
|
which are not available anywhere else in the open-source (or
|
||||||
|
scientific) world. For example, <monospace>FreqAI</monospace>
|
||||||
|
provides :</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>a custom algorithm/methodology for adaptive modeling</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>rapid and self-monitored feature engineering tools</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>unique model features/indicators</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>optimized data collection algorithms</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>safely integrated outlier detection methods</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>websocket communicated forecasts</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>Of particular interest for researchers,
|
||||||
|
<monospace>FreqAI</monospace> provides the option of large scale
|
||||||
|
experimentation via an optimized websocket communications
|
||||||
|
interface.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="optimizing-the-back-end">
|
||||||
|
<title>Optimizing the back-end</title>
|
||||||
|
<p><monospace>FreqAI</monospace> aims to make it simple for users to
|
||||||
|
combine all the above tools to run studies based in two distinct
|
||||||
|
modules:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>backtesting studies</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>live-deployments</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>Both of these modules and their respective data management
|
||||||
|
systems are built on top of
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/"><monospace>Freqtrade</monospace></ext-link>,
|
||||||
|
a mature and actively developed cryptocurrency trading software.
|
||||||
|
This means that <monospace>FreqAI</monospace> benefits from a wide
|
||||||
|
range of tangential/disparate feature developments such as:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>FreqUI, a graphical interface for backtesting and live
|
||||||
|
monitoring</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>telegram control</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>robust database handling</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>futures/leverage trading</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>dollar cost averaging</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>trading strategy handling</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>a variety of free data sources via CCXT (FTX, Binance, Kucoin
|
||||||
|
etc.)</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>These features derive from a strong external developer community
|
||||||
|
that shares in the benefit and stability of a communal CI
|
||||||
|
(Continuous Integration) system. Beyond the developer community,
|
||||||
|
<monospace>FreqAI</monospace> benefits strongly from the userbase of
|
||||||
|
<monospace>Freqtrade</monospace>, where most
|
||||||
|
<monospace>FreqAI</monospace> beta-testers/developers originated.
|
||||||
|
This symbiotic relationship between <monospace>Freqtrade</monospace>
|
||||||
|
and <monospace>FreqAI</monospace> ignited a thoroughly tested
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://github.com/freqtrade/freqtrade/pull/6832"><monospace>beta</monospace></ext-link>,
|
||||||
|
which demanded a four month beta and
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/">comprehensive
|
||||||
|
documentation</ext-link> containing:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>numerous example scripts</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>a full parameter table</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>methodological descriptions</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>high-resolution diagrams/figures</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>detailed parameter setting recommendations</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</sec>
|
||||||
|
<sec id="providing-a-reproducible-foundation-for-researchers">
|
||||||
|
<title>Providing a reproducible foundation for researchers</title>
|
||||||
|
<p><monospace>FreqAI</monospace> provides an extensible, robust,
|
||||||
|
framework for researchers and citizen data scientists. The
|
||||||
|
<monospace>FreqAI</monospace> sandbox enables rapid conception and
|
||||||
|
testing of exotic hypotheses. From a research perspective,
|
||||||
|
<monospace>FreqAI</monospace> handles the multitude of logistics
|
||||||
|
associated with live deployments, historical backtesting, and
|
||||||
|
feature engineering. With <monospace>FreqAI</monospace>, researchers
|
||||||
|
can focus on their primary interests of feature engineering and
|
||||||
|
hypothesis testing rather than figuring out how to collect and
|
||||||
|
handle data. Further - the well maintained and easily installed
|
||||||
|
open-source framework of <monospace>FreqAI</monospace> enables
|
||||||
|
reproducible scientific studies. This reproducibility component is
|
||||||
|
essential to general scientific advancement in time-series
|
||||||
|
forecasting for chaotic systems.</p>
|
||||||
|
</sec>
|
||||||
|
</sec>
|
||||||
|
<sec id="technical-details">
|
||||||
|
<title>Technical details</title>
|
||||||
|
<p>Typical users configure <monospace>FreqAI</monospace> via two
|
||||||
|
files:</p>
|
||||||
|
<list list-type="order">
|
||||||
|
<list-item>
|
||||||
|
<p>A <monospace>configuration</monospace> file
|
||||||
|
(<monospace>--config</monospace>) which provides access to the
|
||||||
|
full parameter list available
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/">here</ext-link>:</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>control high-level feature engineering</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>customize adaptive modeling techniques</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>set any model training parameters available in third-party
|
||||||
|
libraries</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>manage adaptive modeling parameters (retrain frequency,
|
||||||
|
training window size, continual learning, etc.)</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<list list-type="order">
|
||||||
|
<list-item>
|
||||||
|
<label>2.</label>
|
||||||
|
<p>A strategy file (<monospace>--strategy</monospace>) where
|
||||||
|
users:</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>list of the base training features</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>set standard technical-analysis strategies</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>control trade entry/exit criteria</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>With these two files, most users can exploit a wide range of
|
||||||
|
pre-existing integrations in <monospace>Catboost</monospace> and 7
|
||||||
|
other libraries with a simple command:</p>
|
||||||
|
<preformat>freqtrade trade --config config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel CatboostRegressor</preformat>
|
||||||
|
<p>Advanced users will edit one of the existing
|
||||||
|
<monospace>--freqaimodel</monospace> files, which are simply an
|
||||||
|
children of the <monospace>IFreqaiModel</monospace> (details below).
|
||||||
|
Within these files, advanced users can customize training procedures,
|
||||||
|
prediction procedures, outlier detection methods, data preparation,
|
||||||
|
data saving methods, etc. This is all configured in a way where they
|
||||||
|
can customize as little or as much as they want. This flexible
|
||||||
|
customization is owed to the foundational architecture in
|
||||||
|
<monospace>FreqAI</monospace>, which is comprised of three distinct
|
||||||
|
Python objects:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p><monospace>IFreqaiModel</monospace></p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>A singular long-lived object containing all the necessary
|
||||||
|
logic to collect data, store data, process data, engineer
|
||||||
|
features, run training, and inference models.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p><monospace>FreqaiDataKitchen</monospace></p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>A short-lived object which is uniquely created for each
|
||||||
|
asset/model. Beyond metadata, it also contains a variety of
|
||||||
|
data processing tools.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p><monospace>FreqaiDataDrawer</monospace></p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Singular long-lived object containing all the historical
|
||||||
|
predictions, models, and save/load methods.</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>These objects interact with one another with one goal in mind - to
|
||||||
|
provide a clean data set to machine learning experts/enthusiasts at
|
||||||
|
the user endpoint. These power-users interact with an inherited
|
||||||
|
<monospace>IFreqaiModel</monospace> that allows them to dig as deep or
|
||||||
|
as shallow as they wish into the inheritence tree. Typical power-users
|
||||||
|
focus their efforts on customizing training procedures and testing
|
||||||
|
exotic functionalities available in third-party libraries. Thus,
|
||||||
|
power-users are freed from the algorithmic weight associated with data
|
||||||
|
management, and can instead focus their energy on testing creative
|
||||||
|
hypotheses. Meanwhile, some users choose to override deeper
|
||||||
|
functionalities within <monospace>IFreqaiModel</monospace> to help
|
||||||
|
them craft unique data structures and training procedures.</p>
|
||||||
|
<p>The class structure and algorithmic details are depicted in the
|
||||||
|
following diagram:</p>
|
||||||
|
<p><named-content content-type="image">image</named-content>
|
||||||
|
<italic>Class diagram summarizing object interactions in
|
||||||
|
FreqAI</italic></p>
|
||||||
|
</sec>
|
||||||
|
<sec id="online-documentation">
|
||||||
|
<title>Online documentation</title>
|
||||||
|
<p>The documentation for
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
is available online at
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/">https://www.freqtrade.io/en/latest/freqai/</ext-link>
|
||||||
|
and covers a wide range of materials:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Quick-start with a single command and example files -
|
||||||
|
(beginners)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Introduction to the feature engineering interface and basic
|
||||||
|
configurations - (intermediate users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Parameter table with indepth descriptions and default parameter
|
||||||
|
setting recommendations - (intermediate users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Data analysis and post-processing - (advanced users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Methodological considerations complemented by high resolution
|
||||||
|
figures - (advanced users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Instructions for integrating third party machine learning
|
||||||
|
libraries into custom prediction models - (advanced users)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Software architectural description with class diagram -
|
||||||
|
(developers)</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>File structure descriptions - (developers)</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>The docs direct users to a variety of pre-made examples which
|
||||||
|
integrate <monospace>Catboost</monospace>,
|
||||||
|
<monospace>LightGBM</monospace>, <monospace>XGBoost</monospace>,
|
||||||
|
<monospace>Sklearn</monospace>,
|
||||||
|
<monospace>stable_baselines3</monospace>,
|
||||||
|
<monospace>torch</monospace>, <monospace>tensorflow</monospace>.
|
||||||
|
Meanwhile, developers will also find thorough docstrings and type
|
||||||
|
hinting throughout the source code to aid in code readability and
|
||||||
|
customization.</p>
|
||||||
|
<p><monospace>FreqAI</monospace> also benefits from a strong support
|
||||||
|
network of users and developers on the
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://discord.gg/w6nDM6cM4y"><monospace>Freqtrade</monospace>
|
||||||
|
discord</ext-link> as well as on the
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://discord.gg/xE4RMg4QYw"><monospace>FreqAI</monospace>
|
||||||
|
discord</ext-link>. Within the <monospace>FreqAI</monospace> discord,
|
||||||
|
users will find a deep and easily searched knowledge base containing
|
||||||
|
common errors. But more importantly, users in the
|
||||||
|
<monospace>FreqAI</monospace> discord share anectdotal and
|
||||||
|
quantitative observations which compare performance between various
|
||||||
|
third-party libraries and methods.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="state-of-the-field">
|
||||||
|
<title>State of the field</title>
|
||||||
|
<p>There are two other open-source tools which are geared toward
|
||||||
|
helping users build models for time-series forecasts on market based
|
||||||
|
data. However, each of these tools suffer from a non-generalized
|
||||||
|
frameworks that do not permit comparison of methods and libraries.
|
||||||
|
Additionally, they do not permit easy live-deployments or
|
||||||
|
adaptive-modeling methods. For example, two open-sourced projects
|
||||||
|
called
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://tensortradex.readthedocs.io/en/latest/"><monospace>tensortrade</monospace></ext-link>
|
||||||
|
(<xref alt="Tensortrade, 2022" rid="ref-tensortrade" ref-type="bibr"><italic>Tensortrade</italic>,
|
||||||
|
2022</xref>) and
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://github.com/AI4Finance-Foundation/FinRL"><monospace>FinRL</monospace></ext-link>
|
||||||
|
(<xref alt="AI4Finance-Foundation, 2022" rid="ref-finrl" ref-type="bibr"><italic>AI4Finance-Foundation</italic>,
|
||||||
|
2022</xref>) limit users to the exploration of reinforcement learning
|
||||||
|
on historical data. These softwares also do not provide robust live
|
||||||
|
deployments, they do not furnish novel feature engineering algorithms,
|
||||||
|
and they do not provide custom data analysis tools.
|
||||||
|
<monospace>FreqAI</monospace> fills the gap.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="on-going-research">
|
||||||
|
<title>On-going research</title>
|
||||||
|
<p>Emergent Methods, based in Arvada CO, is actively using
|
||||||
|
<monospace>FreqAI</monospace> to perform large scale experiments aimed
|
||||||
|
at comparing machine learning libraries in live and historical
|
||||||
|
environments. Past projects include backtesting parametric sweeps,
|
||||||
|
while active projects include a 3 week live deployment comparison
|
||||||
|
between <monospace>CatboosRegressor</monospace>,
|
||||||
|
<monospace>LightGBMRegressor</monospace>, and
|
||||||
|
<monospace>XGBoostRegressor</monospace>. Results from these studies
|
||||||
|
are on track for publication in scientific journals as well as more
|
||||||
|
general data science blogs (e.g. Medium).</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="installing-and-running-freqai">
|
||||||
|
<title>Installing and running <monospace>FreqAI</monospace></title>
|
||||||
|
<p><monospace>FreqAI</monospace> is automatically installed with
|
||||||
|
<monospace>Freqtrade</monospace> using the following commands on linux
|
||||||
|
systems:</p>
|
||||||
|
<preformat>git clone git@github.com:freqtrade/freqtrade.git
|
||||||
|
cd freqtrade
|
||||||
|
./setup.sh -i</preformat>
|
||||||
|
<p>However, <monospace>FreqAI</monospace> also benefits from
|
||||||
|
<monospace>Freqtrade</monospace> docker distributions, and can be run
|
||||||
|
with docker by pulling the stable or develop images from
|
||||||
|
<monospace>Freqtrade</monospace> distributions.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="funding-sources">
|
||||||
|
<title>Funding sources</title>
|
||||||
|
<p><ext-link ext-link-type="uri" xlink:href="https://www.freqtrade.io/en/latest/freqai/"><monospace>FreqAI</monospace></ext-link>
|
||||||
|
has had no official sponsors, and is entirely grass roots. All
|
||||||
|
donations into the project (e.g. the GitHub sponsor system) are kept
|
||||||
|
inside the project to help support development of open-sourced and
|
||||||
|
communally beneficial features.</p>
|
||||||
|
</sec>
|
||||||
|
<sec id="acknowledgements">
|
||||||
|
<title>Acknowledgements</title>
|
||||||
|
<p>We would like to acknowledge various beta testers of
|
||||||
|
<monospace>FreqAI</monospace>:</p>
|
||||||
|
<list list-type="bullet">
|
||||||
|
<list-item>
|
||||||
|
<p>Richárd Józsa</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Juha Nykänen</p>
|
||||||
|
</list-item>
|
||||||
|
<list-item>
|
||||||
|
<p>Salah Lamkadem</p>
|
||||||
|
</list-item>
|
||||||
|
</list>
|
||||||
|
<p>As well as various <monospace>Freqtrade</monospace>
|
||||||
|
<ext-link ext-link-type="uri" xlink:href="https://github.com/freqtrade/freqtrade/graphs/contributors">developers</ext-link>
|
||||||
|
maintaining tangential, yet essential, modules.</p>
|
||||||
|
</sec>
|
||||||
|
</body>
|
||||||
|
<back>
|
||||||
|
<ref-list>
|
||||||
|
<ref id="ref-scikit-learn">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Pedregosa</surname><given-names>F.</given-names></name>
|
||||||
|
<name><surname>Varoquaux</surname><given-names>G.</given-names></name>
|
||||||
|
<name><surname>Gramfort</surname><given-names>A.</given-names></name>
|
||||||
|
<name><surname>Michel</surname><given-names>V.</given-names></name>
|
||||||
|
<name><surname>Thirion</surname><given-names>B.</given-names></name>
|
||||||
|
<name><surname>Grisel</surname><given-names>O.</given-names></name>
|
||||||
|
<name><surname>Blondel</surname><given-names>M.</given-names></name>
|
||||||
|
<name><surname>Prettenhofer</surname><given-names>P.</given-names></name>
|
||||||
|
<name><surname>Weiss</surname><given-names>R.</given-names></name>
|
||||||
|
<name><surname>Dubourg</surname><given-names>V.</given-names></name>
|
||||||
|
<name><surname>Vanderplas</surname><given-names>J.</given-names></name>
|
||||||
|
<name><surname>Passos</surname><given-names>A.</given-names></name>
|
||||||
|
<name><surname>Cournapeau</surname><given-names>D.</given-names></name>
|
||||||
|
<name><surname>Brucher</surname><given-names>M.</given-names></name>
|
||||||
|
<name><surname>Perrot</surname><given-names>M.</given-names></name>
|
||||||
|
<name><surname>Duchesnay</surname><given-names>E.</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Scikit-learn: Machine learning in Python</article-title>
|
||||||
|
<source>Journal of Machine Learning Research</source>
|
||||||
|
<year iso-8601-date="2011">2011</year>
|
||||||
|
<volume>12</volume>
|
||||||
|
<fpage>2825</fpage>
|
||||||
|
<lpage>2830</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-catboost">
|
||||||
|
<element-citation publication-type="paper-conference">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Prokhorenkova</surname><given-names>Liudmila</given-names></name>
|
||||||
|
<name><surname>Gusev</surname><given-names>Gleb</given-names></name>
|
||||||
|
<name><surname>Vorobev</surname><given-names>Aleksandr</given-names></name>
|
||||||
|
<name><surname>Dorogush</surname><given-names>Anna Veronika</given-names></name>
|
||||||
|
<name><surname>Gulin</surname><given-names>Andrey</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>CatBoost: Unbiased boosting with categorical features</article-title>
|
||||||
|
<source>Proceedings of the 32nd international conference on neural information processing systems</source>
|
||||||
|
<publisher-name>Curran Associates Inc.</publisher-name>
|
||||||
|
<publisher-loc>Red Hook, NY, USA</publisher-loc>
|
||||||
|
<year iso-8601-date="2018">2018</year>
|
||||||
|
<fpage>6639</fpage>
|
||||||
|
<lpage>6649</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-lightgbm">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Ke</surname><given-names>Guolin</given-names></name>
|
||||||
|
<name><surname>Meng</surname><given-names>Qi</given-names></name>
|
||||||
|
<name><surname>Finley</surname><given-names>Thomas</given-names></name>
|
||||||
|
<name><surname>Wang</surname><given-names>Taifeng</given-names></name>
|
||||||
|
<name><surname>Chen</surname><given-names>Wei</given-names></name>
|
||||||
|
<name><surname>Ma</surname><given-names>Weidong</given-names></name>
|
||||||
|
<name><surname>Ye</surname><given-names>Qiwei</given-names></name>
|
||||||
|
<name><surname>Liu</surname><given-names>Tie-Yan</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Lightgbm: A highly efficient gradient boosting decision tree</article-title>
|
||||||
|
<source>Advances in neural information processing systems</source>
|
||||||
|
<year iso-8601-date="2017">2017</year>
|
||||||
|
<volume>30</volume>
|
||||||
|
<fpage>3146</fpage>
|
||||||
|
<lpage>3154</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-xgboost">
|
||||||
|
<element-citation publication-type="paper-conference">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Chen</surname><given-names>Tianqi</given-names></name>
|
||||||
|
<name><surname>Guestrin</surname><given-names>Carlos</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>XGBoost: A scalable tree boosting system</article-title>
|
||||||
|
<source>Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining</source>
|
||||||
|
<publisher-name>ACM</publisher-name>
|
||||||
|
<publisher-loc>New York, NY, USA</publisher-loc>
|
||||||
|
<year iso-8601-date="2016">2016</year>
|
||||||
|
<isbn>978-1-4503-4232-2</isbn>
|
||||||
|
<uri>http://doi.acm.org/10.1145/2939672.2939785</uri>
|
||||||
|
<pub-id pub-id-type="doi">10.1145/2939672.2939785</pub-id>
|
||||||
|
<fpage>785</fpage>
|
||||||
|
<lpage>794</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-stable-baselines3">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Raffin</surname><given-names>Antonin</given-names></name>
|
||||||
|
<name><surname>Hill</surname><given-names>Ashley</given-names></name>
|
||||||
|
<name><surname>Gleave</surname><given-names>Adam</given-names></name>
|
||||||
|
<name><surname>Kanervisto</surname><given-names>Anssi</given-names></name>
|
||||||
|
<name><surname>Ernestus</surname><given-names>Maximilian</given-names></name>
|
||||||
|
<name><surname>Dormann</surname><given-names>Noah</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Stable-Baselines3: Reliable reinforcement learning implementations</article-title>
|
||||||
|
<source>Journal of Machine Learning Research</source>
|
||||||
|
<year iso-8601-date="2021">2021</year>
|
||||||
|
<volume>22</volume>
|
||||||
|
<issue>268</issue>
|
||||||
|
<uri>http://jmlr.org/papers/v22/20-1364.html</uri>
|
||||||
|
<fpage>1</fpage>
|
||||||
|
<lpage>8</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-openai">
|
||||||
|
<element-citation>
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Brockman</surname><given-names>Greg</given-names></name>
|
||||||
|
<name><surname>Cheung</surname><given-names>Vicki</given-names></name>
|
||||||
|
<name><surname>Pettersson</surname><given-names>Ludwig</given-names></name>
|
||||||
|
<name><surname>Schneider</surname><given-names>Jonas</given-names></name>
|
||||||
|
<name><surname>Schulman</surname><given-names>John</given-names></name>
|
||||||
|
<name><surname>Tang</surname><given-names>Jie</given-names></name>
|
||||||
|
<name><surname>Zaremba</surname><given-names>Wojciech</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>OpenAI gym</article-title>
|
||||||
|
<year iso-8601-date="2016">2016</year>
|
||||||
|
<uri>https://arxiv.org/abs/1606.01540</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-tensorflow">
|
||||||
|
<element-citation>
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Abadi</surname><given-names>Martín</given-names></name>
|
||||||
|
<name><surname>Agarwal</surname><given-names>Ashish</given-names></name>
|
||||||
|
<name><surname>Barham</surname><given-names>Paul</given-names></name>
|
||||||
|
<name><surname>Brevdo</surname><given-names>Eugene</given-names></name>
|
||||||
|
<name><surname>Chen</surname><given-names>Zhifeng</given-names></name>
|
||||||
|
<name><surname>Citro</surname><given-names>Craig</given-names></name>
|
||||||
|
<name><surname>Corrado</surname><given-names>Greg S.</given-names></name>
|
||||||
|
<name><surname>Davis</surname><given-names>Andy</given-names></name>
|
||||||
|
<name><surname>Dean</surname><given-names>Jeffrey</given-names></name>
|
||||||
|
<name><surname>Devin</surname><given-names>Matthieu</given-names></name>
|
||||||
|
<name><surname>Ghemawat</surname><given-names>Sanjay</given-names></name>
|
||||||
|
<name><surname>Goodfellow</surname><given-names>Ian</given-names></name>
|
||||||
|
<name><surname>Harp</surname><given-names>Andrew</given-names></name>
|
||||||
|
<name><surname>Irving</surname><given-names>Geoffrey</given-names></name>
|
||||||
|
<name><surname>Isard</surname><given-names>Michael</given-names></name>
|
||||||
|
<name><surname>Jia</surname><given-names>Yangqing</given-names></name>
|
||||||
|
<name><surname>Jozefowicz</surname><given-names>Rafal</given-names></name>
|
||||||
|
<name><surname>Kaiser</surname><given-names>Lukasz</given-names></name>
|
||||||
|
<name><surname>Kudlur</surname><given-names>Manjunath</given-names></name>
|
||||||
|
<name><surname>Levenberg</surname><given-names>Josh</given-names></name>
|
||||||
|
<name><surname>Mané</surname><given-names>Dandelion</given-names></name>
|
||||||
|
<name><surname>Monga</surname><given-names>Rajat</given-names></name>
|
||||||
|
<name><surname>Moore</surname><given-names>Sherry</given-names></name>
|
||||||
|
<name><surname>Murray</surname><given-names>Derek</given-names></name>
|
||||||
|
<name><surname>Olah</surname><given-names>Chris</given-names></name>
|
||||||
|
<name><surname>Schuster</surname><given-names>Mike</given-names></name>
|
||||||
|
<name><surname>Shlens</surname><given-names>Jonathon</given-names></name>
|
||||||
|
<name><surname>Steiner</surname><given-names>Benoit</given-names></name>
|
||||||
|
<name><surname>Sutskever</surname><given-names>Ilya</given-names></name>
|
||||||
|
<name><surname>Talwar</surname><given-names>Kunal</given-names></name>
|
||||||
|
<name><surname>Tucker</surname><given-names>Paul</given-names></name>
|
||||||
|
<name><surname>Vanhoucke</surname><given-names>Vincent</given-names></name>
|
||||||
|
<name><surname>Vasudevan</surname><given-names>Vijay</given-names></name>
|
||||||
|
<name><surname>Viégas</surname><given-names>Fernanda</given-names></name>
|
||||||
|
<name><surname>Vinyals</surname><given-names>Oriol</given-names></name>
|
||||||
|
<name><surname>Warden</surname><given-names>Pete</given-names></name>
|
||||||
|
<name><surname>Wattenberg</surname><given-names>Martin</given-names></name>
|
||||||
|
<name><surname>Wicke</surname><given-names>Martin</given-names></name>
|
||||||
|
<name><surname>Yu</surname><given-names>Yuan</given-names></name>
|
||||||
|
<name><surname>Zheng</surname><given-names>Xiaoqiang</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>TensorFlow: Large-scale machine learning on heterogeneous systems</article-title>
|
||||||
|
<year iso-8601-date="2015">2015</year>
|
||||||
|
<uri>https://www.tensorflow.org/</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-pytorch">
|
||||||
|
<element-citation publication-type="chapter">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Paszke</surname><given-names>Adam</given-names></name>
|
||||||
|
<name><surname>Gross</surname><given-names>Sam</given-names></name>
|
||||||
|
<name><surname>Massa</surname><given-names>Francisco</given-names></name>
|
||||||
|
<name><surname>Lerer</surname><given-names>Adam</given-names></name>
|
||||||
|
<name><surname>Bradbury</surname><given-names>James</given-names></name>
|
||||||
|
<name><surname>Chanan</surname><given-names>Gregory</given-names></name>
|
||||||
|
<name><surname>Killeen</surname><given-names>Trevor</given-names></name>
|
||||||
|
<name><surname>Lin</surname><given-names>Zeming</given-names></name>
|
||||||
|
<name><surname>Gimelshein</surname><given-names>Natalia</given-names></name>
|
||||||
|
<name><surname>Antiga</surname><given-names>Luca</given-names></name>
|
||||||
|
<name><surname>Desmaison</surname><given-names>Alban</given-names></name>
|
||||||
|
<name><surname>Kopf</surname><given-names>Andreas</given-names></name>
|
||||||
|
<name><surname>Yang</surname><given-names>Edward</given-names></name>
|
||||||
|
<name><surname>DeVito</surname><given-names>Zachary</given-names></name>
|
||||||
|
<name><surname>Raison</surname><given-names>Martin</given-names></name>
|
||||||
|
<name><surname>Tejani</surname><given-names>Alykhan</given-names></name>
|
||||||
|
<name><surname>Chilamkurthy</surname><given-names>Sasank</given-names></name>
|
||||||
|
<name><surname>Steiner</surname><given-names>Benoit</given-names></name>
|
||||||
|
<name><surname>Fang</surname><given-names>Lu</given-names></name>
|
||||||
|
<name><surname>Bai</surname><given-names>Junjie</given-names></name>
|
||||||
|
<name><surname>Chintala</surname><given-names>Soumith</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>PyTorch: An imperative style, high-performance deep learning library</article-title>
|
||||||
|
<source>Advances in neural information processing systems 32</source>
|
||||||
|
<person-group person-group-type="editor">
|
||||||
|
<name><surname>Wallach</surname><given-names>H.</given-names></name>
|
||||||
|
<name><surname>Larochelle</surname><given-names>H.</given-names></name>
|
||||||
|
<name><surname>Beygelzimer</surname><given-names>A.</given-names></name>
|
||||||
|
<name><surname>dAlché-Buc</surname><given-names>F.</given-names></name>
|
||||||
|
<name><surname>Fox</surname><given-names>E.</given-names></name>
|
||||||
|
<name><surname>Garnett</surname><given-names>R.</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<publisher-name>Curran Associates, Inc.</publisher-name>
|
||||||
|
<year iso-8601-date="2019">2019</year>
|
||||||
|
<uri>http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf</uri>
|
||||||
|
<fpage>8024</fpage>
|
||||||
|
<lpage>8035</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-scipy">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Virtanen</surname><given-names>Pauli</given-names></name>
|
||||||
|
<name><surname>Gommers</surname><given-names>Ralf</given-names></name>
|
||||||
|
<name><surname>Oliphant</surname><given-names>Travis E.</given-names></name>
|
||||||
|
<name><surname>Haberland</surname><given-names>Matt</given-names></name>
|
||||||
|
<name><surname>Reddy</surname><given-names>Tyler</given-names></name>
|
||||||
|
<name><surname>Cournapeau</surname><given-names>David</given-names></name>
|
||||||
|
<name><surname>Burovski</surname><given-names>Evgeni</given-names></name>
|
||||||
|
<name><surname>Peterson</surname><given-names>Pearu</given-names></name>
|
||||||
|
<name><surname>Weckesser</surname><given-names>Warren</given-names></name>
|
||||||
|
<name><surname>Bright</surname><given-names>Jonathan</given-names></name>
|
||||||
|
<name><surname>van der Walt</surname><given-names>Stéfan J.</given-names></name>
|
||||||
|
<name><surname>Brett</surname><given-names>Matthew</given-names></name>
|
||||||
|
<name><surname>Wilson</surname><given-names>Joshua</given-names></name>
|
||||||
|
<name><surname>Millman</surname><given-names>K. Jarrod</given-names></name>
|
||||||
|
<name><surname>Mayorov</surname><given-names>Nikolay</given-names></name>
|
||||||
|
<name><surname>Nelson</surname><given-names>Andrew R. J.</given-names></name>
|
||||||
|
<name><surname>Jones</surname><given-names>Eric</given-names></name>
|
||||||
|
<name><surname>Kern</surname><given-names>Robert</given-names></name>
|
||||||
|
<name><surname>Larson</surname><given-names>Eric</given-names></name>
|
||||||
|
<name><surname>Carey</surname><given-names>C J</given-names></name>
|
||||||
|
<name><surname>Polat</surname><given-names>İlhan</given-names></name>
|
||||||
|
<name><surname>Feng</surname><given-names>Yu</given-names></name>
|
||||||
|
<name><surname>Moore</surname><given-names>Eric W.</given-names></name>
|
||||||
|
<name><surname>VanderPlas</surname><given-names>Jake</given-names></name>
|
||||||
|
<name><surname>Laxalde</surname><given-names>Denis</given-names></name>
|
||||||
|
<name><surname>Perktold</surname><given-names>Josef</given-names></name>
|
||||||
|
<name><surname>Cimrman</surname><given-names>Robert</given-names></name>
|
||||||
|
<name><surname>Henriksen</surname><given-names>Ian</given-names></name>
|
||||||
|
<name><surname>Quintero</surname><given-names>E. A.</given-names></name>
|
||||||
|
<name><surname>Harris</surname><given-names>Charles R.</given-names></name>
|
||||||
|
<name><surname>Archibald</surname><given-names>Anne M.</given-names></name>
|
||||||
|
<name><surname>Ribeiro</surname><given-names>Antônio H.</given-names></name>
|
||||||
|
<name><surname>Pedregosa</surname><given-names>Fabian</given-names></name>
|
||||||
|
<name><surname>van Mulbregt</surname><given-names>Paul</given-names></name>
|
||||||
|
<string-name>SciPy 1.0 Contributors</string-name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>SciPy 1.0: Fundamental Algorithms for Scientific Computing in Python</article-title>
|
||||||
|
<source>Nature Methods</source>
|
||||||
|
<year iso-8601-date="2020">2020</year>
|
||||||
|
<volume>17</volume>
|
||||||
|
<pub-id pub-id-type="doi">10.1038/s41592-019-0686-2</pub-id>
|
||||||
|
<fpage>261</fpage>
|
||||||
|
<lpage>272</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-numpy">
|
||||||
|
<element-citation publication-type="article-journal">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>Harris</surname><given-names>Charles R.</given-names></name>
|
||||||
|
<name><surname>Millman</surname><given-names>K. Jarrod</given-names></name>
|
||||||
|
<name><surname>Walt</surname><given-names>Stéfan J. van der</given-names></name>
|
||||||
|
<name><surname>Gommers</surname><given-names>Ralf</given-names></name>
|
||||||
|
<name><surname>Virtanen</surname><given-names>Pauli</given-names></name>
|
||||||
|
<name><surname>Cournapeau</surname><given-names>David</given-names></name>
|
||||||
|
<name><surname>Wieser</surname><given-names>Eric</given-names></name>
|
||||||
|
<name><surname>Taylor</surname><given-names>Julian</given-names></name>
|
||||||
|
<name><surname>Berg</surname><given-names>Sebastian</given-names></name>
|
||||||
|
<name><surname>Smith</surname><given-names>Nathaniel J.</given-names></name>
|
||||||
|
<name><surname>Kern</surname><given-names>Robert</given-names></name>
|
||||||
|
<name><surname>Picus</surname><given-names>Matti</given-names></name>
|
||||||
|
<name><surname>Hoyer</surname><given-names>Stephan</given-names></name>
|
||||||
|
<name><surname>Kerkwijk</surname><given-names>Marten H. van</given-names></name>
|
||||||
|
<name><surname>Brett</surname><given-names>Matthew</given-names></name>
|
||||||
|
<name><surname>Haldane</surname><given-names>Allan</given-names></name>
|
||||||
|
<name><surname>Río</surname><given-names>Jaime Fernández del</given-names></name>
|
||||||
|
<name><surname>Wiebe</surname><given-names>Mark</given-names></name>
|
||||||
|
<name><surname>Peterson</surname><given-names>Pearu</given-names></name>
|
||||||
|
<name><surname>Gérard-Marchant</surname><given-names>Pierre</given-names></name>
|
||||||
|
<name><surname>Sheppard</surname><given-names>Kevin</given-names></name>
|
||||||
|
<name><surname>Reddy</surname><given-names>Tyler</given-names></name>
|
||||||
|
<name><surname>Weckesser</surname><given-names>Warren</given-names></name>
|
||||||
|
<name><surname>Abbasi</surname><given-names>Hameer</given-names></name>
|
||||||
|
<name><surname>Gohlke</surname><given-names>Christoph</given-names></name>
|
||||||
|
<name><surname>Oliphant</surname><given-names>Travis E.</given-names></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Array programming with NumPy</article-title>
|
||||||
|
<source>Nature</source>
|
||||||
|
<publisher-name>Springer Science; Business Media LLC</publisher-name>
|
||||||
|
<year iso-8601-date="2020-09">2020</year><month>09</month>
|
||||||
|
<volume>585</volume>
|
||||||
|
<issue>7825</issue>
|
||||||
|
<uri>https://doi.org/10.1038/s41586-020-2649-2</uri>
|
||||||
|
<pub-id pub-id-type="doi">10.1038/s41586-020-2649-2</pub-id>
|
||||||
|
<fpage>357</fpage>
|
||||||
|
<lpage>362</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-pandas">
|
||||||
|
<element-citation publication-type="paper-conference">
|
||||||
|
<person-group person-group-type="author">
|
||||||
|
<name><surname>McKinney</surname><given-names>Wes</given-names></name>
|
||||||
|
<name><surname>others</surname></name>
|
||||||
|
</person-group>
|
||||||
|
<article-title>Data structures for statistical computing in python</article-title>
|
||||||
|
<source>Proceedings of the 9th python in science conference</source>
|
||||||
|
<publisher-name>Austin, TX</publisher-name>
|
||||||
|
<year iso-8601-date="2010">2010</year>
|
||||||
|
<volume>445</volume>
|
||||||
|
<fpage>51</fpage>
|
||||||
|
<lpage>56</lpage>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-finrl">
|
||||||
|
<element-citation publication-type="webpage">
|
||||||
|
<article-title>AI4Finance-foundation</article-title>
|
||||||
|
<year iso-8601-date="2022">2022</year>
|
||||||
|
<date-in-citation content-type="access-date"><year iso-8601-date="2022-09-30">2022</year><month>09</month><day>30</day></date-in-citation>
|
||||||
|
<uri>https://github.com/AI4Finance-Foundation/FinRL</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
<ref id="ref-tensortrade">
|
||||||
|
<element-citation publication-type="webpage">
|
||||||
|
<article-title>Tensortrade</article-title>
|
||||||
|
<year iso-8601-date="2022">2022</year>
|
||||||
|
<date-in-citation content-type="access-date"><year iso-8601-date="2022-09-30">2022</year><month>09</month><day>30</day></date-in-citation>
|
||||||
|
<uri>https://tensortradex.readthedocs.io/en/latest/L</uri>
|
||||||
|
</element-citation>
|
||||||
|
</ref>
|
||||||
|
</ref-list>
|
||||||
|
</back>
|
||||||
|
</article>
|
||||||
212
docs/JOSS_paper/paper.md
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
---
|
||||||
|
title: '`FreqAI`: generalizing adaptive modeling for chaotic time-series market forecasts'
|
||||||
|
tags:
|
||||||
|
- Python
|
||||||
|
- Machine Learning
|
||||||
|
- adaptive modeling
|
||||||
|
- chaotic systems
|
||||||
|
- time-series forecasting
|
||||||
|
authors:
|
||||||
|
- name: Robert A. Caulk Ph.D
|
||||||
|
orcid: 0000-0001-5618-8629
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Elin Törnquist Ph.D
|
||||||
|
orcid: 0000-0003-3289-8604
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Matthias Voppichler
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Andrew R. Lawless
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Ryan McMullan
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Wagner Costa Santos
|
||||||
|
orcid:
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Timothy C. Pogue
|
||||||
|
orcid:
|
||||||
|
affiliation: 1, 2
|
||||||
|
- name: Johan van der Vlugt
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Stefan P. Gehring
|
||||||
|
orcid:
|
||||||
|
affiliation: 2
|
||||||
|
- name: Pascal Schmidt
|
||||||
|
orcid: 0000-0001-9328-4345
|
||||||
|
affiliation: 2
|
||||||
|
|
||||||
|
<!-- affiliation: "1, 2" # (Multiple affiliations must be quoted) -->
|
||||||
|
affiliations:
|
||||||
|
- name: Emergent Methods LLC, Arvada Colorado, 80005, USA
|
||||||
|
index: 1
|
||||||
|
- name: Freqtrade open source project
|
||||||
|
index: 2
|
||||||
|
date: October 2022
|
||||||
|
bibliography: paper.bib
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Statement of need
|
||||||
|
|
||||||
|
Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`), has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citizen scientists" to use their basic Python skills for data-exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data-collection, storage, and handling presents a disparate challenge. [`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data.
|
||||||
|
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
[`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) evolved from a desire to test and compare a range of adaptive time-series forecasting methods on chaotic data. Cryptocurrency markets provide a unique data source since they are operational 24/7 and the data is freely available via a variety of open-sourced [exchange APIs](https://docs.ccxt.com/en/latest/manual.html#exchange-structure). Luckily, an existing open-source software, [`Freqtrade`](https://www.freqtrade.io/en/stable/), had already matured under a range of talented developers to support robust data collection/storage, as well as robust live environmental interactions for standard algorithmic trading. `Freqtrade` also provides a set of data analysis/visualization tools for the evaluation of historical performance as well as live environmental feedback. `FreqAI` builds on top of `Freqtrade` to include a user-friendly well tested interface for integrating external machine learning libraries for adaptive time-series forecasting. Beyond enabling the integration of existing libraries, `FreqAI` hosts a range of custom algorithms and methodologies aimed at improving computational and predictive performances. Thus, `FreqAI` contains a range of unique features which can be easily tested in combination with all the existing Python-accessible machine learning libraries to generate novel research on live and historical data.
|
||||||
|
|
||||||
|
The high-level overview of the software is depicted in Figure 1.
|
||||||
|
|
||||||
|

|
||||||
|
*Abstracted overview of FreqAI algorithm*
|
||||||
|
|
||||||
|
## Connecting machine learning libraries
|
||||||
|
|
||||||
|
Although the `FreqAI` framework is designed to accommodate any Python library in the "Model training" and "Feature set engineering" portions of the software (Figure 1), it already boasts a wide range of well documented examples based on various combinations of:
|
||||||
|
|
||||||
|
* scikit-learn [@scikit-learn], Catboost [@catboost], LightGBM [@lightgbm], XGBoost [@xgboost], stable_baselines3 [@stable-baselines3], openai gym [@openai], tensorflow [@tensorflow], pytorch [@pytorch], Scipy [@scipy], Numpy [@numpy], and pandas [@pandas].
|
||||||
|
|
||||||
|
These mature projects contain a wide range of peer-reviewed and industry standard methods, including:
|
||||||
|
|
||||||
|
* Regression, Classification, Neural Networks, Reinforcement Learning, Support Vector Machines, Principal Component Analysis, point clustering, and much more.
|
||||||
|
|
||||||
|
which are all leveraged in `FreqAI` for users to use as templates or extend with their own methods.
|
||||||
|
|
||||||
|
## Furnishing novel methods and features
|
||||||
|
|
||||||
|
Beyond the industry standard methods available through external libraries - `FreqAI` includes novel methods which are not available anywhere else in the open-source (or scientific) world. For example, `FreqAI` provides :
|
||||||
|
|
||||||
|
* a custom algorithm/methodology for adaptive modeling details [here](https://www.freqtrade.io/en/stable/freqai/#general-approach) and [here](https://www.freqtrade.io/en/stable/freqai-developers/#project-architecture)
|
||||||
|
* rapid and self-monitored feature engineering tools, details [here](https://www.freqtrade.io/en/stable/freqai-feature-engineering/#feature-engineering)
|
||||||
|
* unique model features/indicators, such as the [inlier metric](https://www.freqtrade.io/en/stable/freqai-feature-engineering/#inlier-metric)
|
||||||
|
* optimized data collection/storage algorithms, all code shown [here](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/freqai/data_drawer.py)
|
||||||
|
* safely integrated outlier detection methods, details [here](https://www.freqtrade.io/en/stable/freqai-feature-engineering/#outlier-detection)
|
||||||
|
* websocket communicated forecasts, details [here](https://www.freqtrade.io/en/stable/producer-consumer/)
|
||||||
|
|
||||||
|
Of particular interest for researchers, `FreqAI` provides the option of large scale experimentation via an optimized [websocket communications interface](https://www.freqtrade.io/en/stable/producer-consumer/).
|
||||||
|
|
||||||
|
## Optimizing the back-end
|
||||||
|
|
||||||
|
`FreqAI` aims to make it simple for users to combine all the above tools to run studies based in two distinct modules:
|
||||||
|
|
||||||
|
* backtesting studies
|
||||||
|
* live-deployments
|
||||||
|
|
||||||
|
Both of these modules and their respective data management systems are built on top of [`Freqtrade`](https://www.freqtrade.io/en/latest/), a mature and actively developed cryptocurrency trading software. This means that `FreqAI` benefits from a wide range of tangential/disparate feature developments such as:
|
||||||
|
|
||||||
|
* FreqUI, a graphical interface for backtesting and live monitoring
|
||||||
|
* telegram control
|
||||||
|
* robust database handling
|
||||||
|
* futures/leverage trading
|
||||||
|
* dollar cost averaging
|
||||||
|
* trading strategy handling
|
||||||
|
* a variety of free data sources via [CCXT](https://docs.ccxt.com/en/latest/manual.html#exchange-structure) (FTX, Binance, Kucoin etc.)
|
||||||
|
|
||||||
|
These features derive from a strong external developer community that shares in the benefit and stability of a communal CI (Continuous Integration) system. Beyond the developer community, `FreqAI` benefits strongly from the userbase of `Freqtrade`, where most `FreqAI` beta-testers/developers originated. This symbiotic relationship between `Freqtrade` and `FreqAI` ignited a thoroughly tested [`beta`](https://github.com/freqtrade/freqtrade/pull/6832), which demanded a four month beta and [comprehensive documentation](https://www.freqtrade.io/en/latest/freqai/) containing:
|
||||||
|
|
||||||
|
* numerous example scripts
|
||||||
|
* a full parameter table
|
||||||
|
* methodological descriptions
|
||||||
|
* high-resolution diagrams/figures
|
||||||
|
* detailed parameter setting recommendations
|
||||||
|
|
||||||
|
## Providing a reproducible foundation for researchers
|
||||||
|
|
||||||
|
`FreqAI` provides an extensible, robust, framework for researchers and citizen data scientists. The `FreqAI` sandbox enables rapid conception and testing of exotic hypotheses. From a research perspective, `FreqAI` handles the multitude of logistics associated with live deployments, historical backtesting, and feature engineering. With `FreqAI`, researchers can focus on their primary interests of feature engineering and hypothesis testing rather than figuring out how to collect and handle data. Further - the well maintained and easily installed open-source framework of `FreqAI` enables reproducible scientific studies. This reproducibility component is essential to general scientific advancement in time-series forecasting for chaotic systems.
|
||||||
|
|
||||||
|
# Technical details
|
||||||
|
|
||||||
|
Typical users configure `FreqAI` via two files:
|
||||||
|
|
||||||
|
1. A `configuration` file (`--config`) which provides access to the full parameter list available [here](https://www.freqtrade.io/en/latest/freqai/):
|
||||||
|
* control high-level feature engineering
|
||||||
|
* customize adaptive modeling techniques
|
||||||
|
* set any model training parameters available in third-party libraries
|
||||||
|
* manage adaptive modeling parameters (retrain frequency, training window size, continual learning, etc.)
|
||||||
|
|
||||||
|
2. A strategy file (`--strategy`) where users:
|
||||||
|
* list of the base training features
|
||||||
|
* set standard technical-analysis strategies
|
||||||
|
* control trade entry/exit criteria
|
||||||
|
|
||||||
|
With these two files, most users can exploit a wide range of pre-existing integrations in `Catboost` and 7 other libraries with a simple command:
|
||||||
|
|
||||||
|
```
|
||||||
|
freqtrade trade --config config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel CatboostRegressor
|
||||||
|
```
|
||||||
|
|
||||||
|
Advanced users will edit one of the existing `--freqaimodel` files, which are simply an children of the `IFreqaiModel` (details below). Within these files, advanced users can customize training procedures, prediction procedures, outlier detection methods, data preparation, data saving methods, etc. This is all configured in a way where they can customize as little or as much as they want. This flexible customization is owed to the foundational architecture in `FreqAI`, which is comprised of three distinct Python objects:
|
||||||
|
|
||||||
|
* `IFreqaiModel`
|
||||||
|
* A singular long-lived object containing all the necessary logic to collect data, store data, process data, engineer features, run training, and inference models.
|
||||||
|
* `FreqaiDataKitchen`
|
||||||
|
* A short-lived object which is uniquely created for each asset/model. Beyond metadata, it also contains a variety of data processing tools.
|
||||||
|
* `FreqaiDataDrawer`
|
||||||
|
* Singular long-lived object containing all the historical predictions, models, and save/load methods.
|
||||||
|
|
||||||
|
These objects interact with one another with one goal in mind - to provide a clean data set to machine learning experts/enthusiasts at the user endpoint. These power-users interact with an inherited `IFreqaiModel` that allows them to dig as deep or as shallow as they wish into the inheritence tree. Typical power-users focus their efforts on customizing training procedures and testing exotic functionalities available in third-party libraries. Thus, power-users are freed from the algorithmic weight associated with data management, and can instead focus their energy on testing creative hypotheses. Meanwhile, some users choose to override deeper functionalities within `IFreqaiModel` to help them craft unique data structures and training procedures.
|
||||||
|
|
||||||
|
The class structure and algorithmic details are depicted in the following diagram:
|
||||||
|
|
||||||
|

|
||||||
|
*Class diagram summarizing object interactions in FreqAI*
|
||||||
|
|
||||||
|
# Online documentation
|
||||||
|
|
||||||
|
The documentation for [`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) is available online at [https://www.freqtrade.io/en/latest/freqai/](https://www.freqtrade.io/en/latest/freqai/) and covers a wide range of materials:
|
||||||
|
|
||||||
|
* Quick-start with a single command and example files - (beginners)
|
||||||
|
* Introduction to the feature engineering interface and basic configurations - (intermediate users)
|
||||||
|
* Parameter table with indepth descriptions and default parameter setting recommendations - (intermediate users)
|
||||||
|
* Data analysis and post-processing - (advanced users)
|
||||||
|
* Methodological considerations complemented by high resolution figures - (advanced users)
|
||||||
|
* Instructions for integrating third party machine learning libraries into custom prediction models - (advanced users)
|
||||||
|
* Software architectural description with class diagram - (developers)
|
||||||
|
* File structure descriptions - (developers)
|
||||||
|
|
||||||
|
The docs direct users to a variety of pre-made examples which integrate `Catboost`, `LightGBM`, `XGBoost`, `Sklearn`, `stable_baselines3`, `torch`, `tensorflow`. Meanwhile, developers will also find thorough docstrings and type hinting throughout the source code to aid in code readability and customization.
|
||||||
|
|
||||||
|
`FreqAI` also benefits from a strong support network of users and developers on the [`Freqtrade` discord](https://discord.gg/w6nDM6cM4y) as well as on the [`FreqAI` discord](https://discord.gg/xE4RMg4QYw). Within the `FreqAI` discord, users will find a deep and easily searched knowledge base containing common errors. But more importantly, users in the `FreqAI` discord share anectdotal and quantitative observations which compare performance between various third-party libraries and methods.
|
||||||
|
|
||||||
|
# State of the field
|
||||||
|
|
||||||
|
There are two other open-source tools which are geared toward helping users build models for time-series forecasts on market based data. However, each of these tools suffer from a non-generalized frameworks that do not permit comparison of methods and libraries. Additionally, they do not permit easy live-deployments or adaptive-modeling methods. For example, two open-sourced projects called [`tensortrade`](https://tensortradex.readthedocs.io/en/latest/) [@tensortrade] and [`FinRL`](https://github.com/AI4Finance-Foundation/FinRL) [@finrl] limit users to the exploration of reinforcement learning on historical data. These softwares also do not provide robust live deployments, they do not furnish novel feature engineering algorithms, and they do not provide custom data analysis tools. `FreqAI` fills the gap.
|
||||||
|
|
||||||
|
# On-going research
|
||||||
|
|
||||||
|
Emergent Methods, based in Arvada CO, is actively using `FreqAI` to perform large scale experiments aimed at comparing machine learning libraries in live and historical environments. Past projects include backtesting parametric sweeps, while active projects include a 3 week live deployment comparison between `CatboostRegressor`, `LightGBMRegressor`, and `XGBoostRegressor`. Results from these studies are planned for submission to scientific journals as well as more general data science blogs (e.g. Medium).
|
||||||
|
|
||||||
|
# Installing and running `FreqAI`
|
||||||
|
|
||||||
|
`FreqAI` is automatically installed with `Freqtrade` using the following commands on linux systems:
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone git@github.com:freqtrade/freqtrade.git
|
||||||
|
cd freqtrade
|
||||||
|
./setup.sh -i
|
||||||
|
```
|
||||||
|
|
||||||
|
However, `FreqAI` also benefits from `Freqtrade` docker distributions, and can be run with docker by pulling the stable or develop images from `Freqtrade` distributions.
|
||||||
|
|
||||||
|
# Funding sources
|
||||||
|
|
||||||
|
[`FreqAI`](https://www.freqtrade.io/en/latest/freqai/) has had no official sponsors, and is entirely grass roots. All donations into the project (e.g. the GitHub sponsor system) are kept inside the project to help support development of open-sourced and communally beneficial features.
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
|
||||||
|
We would like to acknowledge various beta testers of `FreqAI`:
|
||||||
|
|
||||||
|
- Longlong Yu (@lolongcovas)
|
||||||
|
- Richárd Józsa (@richardjozsa)
|
||||||
|
- Juha Nykänen (@suikula)
|
||||||
|
- Emre Suzen (@aemr3)
|
||||||
|
- Salah Lamkadem (@ikonx)
|
||||||
|
|
||||||
|
As well as various `Freqtrade` [developers](https://github.com/freqtrade/freqtrade/graphs/contributors) maintaining tangential, yet essential, modules.
|
||||||
|
|
||||||
|
# References
|
||||||
BIN
docs/JOSS_paper/paper.pdf
Normal file
@@ -18,31 +18,31 @@ freqtrade backtesting -c <config.json> --timeframe <tf> --strategy <strategy_nam
|
|||||||
```
|
```
|
||||||
|
|
||||||
This will tell freqtrade to output a pickled dictionary of strategy, pairs and corresponding
|
This will tell freqtrade to output a pickled dictionary of strategy, pairs and corresponding
|
||||||
DataFrame of the candles that resulted in entry and exit signals.
|
DataFrame of the candles that resulted in buy signals. Depending on how many buys your strategy
|
||||||
Depending on how many entries your strategy makes, this file may get quite large, so periodically check your `user_data/backtest_results` folder to delete old exports.
|
makes, this file may get quite large, so periodically check your `user_data/backtest_results`
|
||||||
|
folder to delete old exports.
|
||||||
|
|
||||||
Before running your next backtest, make sure you either delete your old backtest results or run
|
Before running your next backtest, make sure you either delete your old backtest results or run
|
||||||
backtesting with the `--cache none` option to make sure no cached results are used.
|
backtesting with the `--cache none` option to make sure no cached results are used.
|
||||||
|
|
||||||
If all goes well, you should now see a `backtest-result-{timestamp}_signals.pkl` and `backtest-result-{timestamp}_exited.pkl` files in the `user_data/backtest_results` folder.
|
If all goes well, you should now see a `backtest-result-{timestamp}_signals.pkl` file in the
|
||||||
|
`user_data/backtest_results` folder.
|
||||||
|
|
||||||
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
|
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
|
||||||
with `--analysis-groups` option provided with space-separated arguments:
|
with `--analysis-groups` option provided with space-separated arguments (default `0 1 2`):
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4 5
|
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will read from the last backtesting results. The `--analysis-groups` option is
|
This command will read from the last backtesting results. The `--analysis-groups` option is
|
||||||
used to specify the various tabular outputs showing the profit of each group or trade,
|
used to specify the various tabular outputs showing the profit fo each group or trade,
|
||||||
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
|
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
|
||||||
|
|
||||||
* 0: overall winrate and profit summary by enter_tag
|
|
||||||
* 1: profit summaries grouped by enter_tag
|
* 1: profit summaries grouped by enter_tag
|
||||||
* 2: profit summaries grouped by enter_tag and exit_tag
|
* 2: profit summaries grouped by enter_tag and exit_tag
|
||||||
* 3: profit summaries grouped by pair and enter_tag
|
* 3: profit summaries grouped by pair and enter_tag
|
||||||
* 4: profit summaries grouped by pair, enter_ and exit_tag (this can get quite large)
|
* 4: profit summaries grouped by pair, enter_ and exit_tag (this can get quite large)
|
||||||
* 5: profit summaries grouped by exit_tag
|
|
||||||
|
|
||||||
More options are available by running with the `-h` option.
|
More options are available by running with the `-h` option.
|
||||||
|
|
||||||
@@ -100,119 +100,3 @@ freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 2 --enter-re
|
|||||||
The indicators have to be present in your strategy's main DataFrame (either for your main
|
The indicators have to be present in your strategy's main DataFrame (either for your main
|
||||||
timeframe or for informative timeframes) otherwise they will simply be ignored in the script
|
timeframe or for informative timeframes) otherwise they will simply be ignored in the script
|
||||||
output.
|
output.
|
||||||
|
|
||||||
!!! Note "Indicator List"
|
|
||||||
The indicator values will be displayed for both entry and exit points. If `--indicator-list all` is specified,
|
|
||||||
only the indicators at the entry point will be shown to avoid excessively large lists, which could occur depending on the strategy.
|
|
||||||
|
|
||||||
There are a range of candle and trade-related fields that are included in the analysis so are
|
|
||||||
automatically accessible by including them on the indicator-list, and these include:
|
|
||||||
|
|
||||||
- **open_date :** trade open datetime
|
|
||||||
- **close_date :** trade close datetime
|
|
||||||
- **min_rate :** minimum price seen throughout the position
|
|
||||||
- **max_rate :** maximum price seen throughout the position
|
|
||||||
- **open :** signal candle open price
|
|
||||||
- **close :** signal candle close price
|
|
||||||
- **high :** signal candle high price
|
|
||||||
- **low :** signal candle low price
|
|
||||||
- **volume :** signal candle volume
|
|
||||||
- **profit_ratio :** trade profit ratio
|
|
||||||
- **profit_abs :** absolute profit return of the trade
|
|
||||||
|
|
||||||
#### Sample Output for Indicator Values
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c user_data/config.json --analysis-groups 0 --indicator-list chikou_span tenkan_sen
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example,
|
|
||||||
we aim to display the `chikou_span` and `tenkan_sen` indicator values at both the entry and exit points of trades.
|
|
||||||
|
|
||||||
A sample output for indicators might look like this:
|
|
||||||
|
|
||||||
| pair | open_date | enter_reason | exit_reason | chikou_span (entry) | tenkan_sen (entry) | chikou_span (exit) | tenkan_sen (exit) |
|
|
||||||
|-----------|---------------------------|--------------|-------------|---------------------|--------------------|--------------------|-------------------|
|
|
||||||
| DOGE/USDT | 2024-07-06 00:35:00+00:00 | | exit_signal | 0.105 | 0.106 | 0.105 | 0.107 |
|
|
||||||
| BTC/USDT | 2024-08-05 14:20:00+00:00 | | roi | 54643.440 | 51696.400 | 54386.000 | 52072.010 |
|
|
||||||
|
|
||||||
As shown in the table, `chikou_span (entry)` represents the indicator value at the time of trade entry,
|
|
||||||
while `chikou_span (exit)` reflects its value at the time of exit.
|
|
||||||
This detailed view of indicator values enhances the analysis.
|
|
||||||
|
|
||||||
The `(entry)` and `(exit)` suffixes are added to indicators
|
|
||||||
to distinguish the values at the entry and exit points of the trade.
|
|
||||||
|
|
||||||
!!! Note "Trade-wide Indicators"
|
|
||||||
Certain trade-wide indicators do not have the `(entry)` or `(exit)` suffix. These indicators include: `pair`, `stake_amount`,
|
|
||||||
`max_stake_amount`, `amount`, `open_date`, `close_date`, `open_rate`, `close_rate`, `fee_open`, `fee_close`, `trade_duration`,
|
|
||||||
`profit_ratio`, `profit_abs`, `exit_reason`,`initial_stop_loss_abs`, `initial_stop_loss_ratio`, `stop_loss_abs`, `stop_loss_ratio`,
|
|
||||||
`min_rate`, `max_rate`, `is_open`, `enter_tag`, `leverage`, `is_short`, `open_timestamp`, `close_timestamp` and `orders`
|
|
||||||
|
|
||||||
#### Filtering Indicators Based on Entry or Exit Signals
|
|
||||||
|
|
||||||
The `--indicator-list` option, by default, displays indicator values for both entry and exit signals. To filter the indicator values exclusively for entry signals, you can use the `--entry-only` argument. Similarly, to display indicator values only at exit signals, use the `--exit-only` argument.
|
|
||||||
|
|
||||||
Example: Display indicator values at entry signals:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c user_data/config.json --analysis-groups 0 --indicator-list chikou_span tenkan_sen --entry-only
|
|
||||||
```
|
|
||||||
|
|
||||||
Example: Display indicator values at exit signals:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c user_data/config.json --analysis-groups 0 --indicator-list chikou_span tenkan_sen --exit-only
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
When using these filters, the indicator names will not be suffixed with `(entry)` or `(exit)`.
|
|
||||||
|
|
||||||
### Filtering the trade output by date
|
|
||||||
|
|
||||||
To show only trades between dates within your backtested timerange, supply the usual `timerange` option in `YYYYMMDD-[YYYYMMDD]` format:
|
|
||||||
|
|
||||||
```
|
|
||||||
--timerange : Timerange to filter output trades, start date inclusive, end date exclusive. e.g. 20220101-20221231
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, if your backtest timerange was `20220101-20221231` but you only want to output trades in January:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
|
|
||||||
```
|
|
||||||
|
|
||||||
### Printing out rejected signals
|
|
||||||
|
|
||||||
Use the `--rejected-signals` option to print out rejected signals.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c <config.json> --rejected-signals
|
|
||||||
```
|
|
||||||
|
|
||||||
### Writing tables to CSV
|
|
||||||
|
|
||||||
Some of the tabular outputs can become large, so printing them out to the terminal is not preferable.
|
|
||||||
Use the `--analysis-to-csv` option to disable printing out of tables to standard out and write them to CSV files.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv
|
|
||||||
```
|
|
||||||
|
|
||||||
By default this will write one file per output table you specified in the `backtesting-analysis` command, e.g.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --rejected-signals --analysis-groups 0 1
|
|
||||||
```
|
|
||||||
|
|
||||||
This will write to `user_data/backtest_results`:
|
|
||||||
|
|
||||||
* rejected_signals.csv
|
|
||||||
* group_0.csv
|
|
||||||
* group_1.csv
|
|
||||||
|
|
||||||
To override where the files will be written, also specify the `--analysis-csv-path` option.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --analysis-csv-path another/data/path/
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -30,18 +30,11 @@ class SuperDuperHyperOptLoss(IHyperOptLoss):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def hyperopt_loss_function(
|
def hyperopt_loss_function(results: DataFrame, trade_count: int,
|
||||||
*,
|
min_date: datetime, max_date: datetime,
|
||||||
results: DataFrame,
|
config: Config, processed: Dict[str, DataFrame],
|
||||||
trade_count: int,
|
backtest_stats: Dict[str, Any],
|
||||||
min_date: datetime,
|
*args, **kwargs) -> float:
|
||||||
max_date: datetime,
|
|
||||||
config: Config,
|
|
||||||
processed: dict[str, DataFrame],
|
|
||||||
backtest_stats: dict[str, Any],
|
|
||||||
starting_balance: float,
|
|
||||||
**kwargs,
|
|
||||||
) -> float:
|
|
||||||
"""
|
"""
|
||||||
Objective function, returns smaller number for better results
|
Objective function, returns smaller number for better results
|
||||||
This is the legacy algorithm (used until now in freqtrade).
|
This is the legacy algorithm (used until now in freqtrade).
|
||||||
@@ -71,7 +64,6 @@ Currently, the arguments are:
|
|||||||
* `config`: Config object used (Note: Not all strategy-related parameters will be updated here if they are part of a hyperopt space).
|
* `config`: Config object used (Note: Not all strategy-related parameters will be updated here if they are part of a hyperopt space).
|
||||||
* `processed`: Dict of Dataframes with the pair as keys containing the data used for backtesting.
|
* `processed`: Dict of Dataframes with the pair as keys containing the data used for backtesting.
|
||||||
* `backtest_stats`: Backtesting statistics using the same format as the backtesting file "strategy" substructure. Available fields can be seen in `generate_strategy_stats()` in `optimize_reports.py`.
|
* `backtest_stats`: Backtesting statistics using the same format as the backtesting file "strategy" substructure. Available fields can be seen in `generate_strategy_stats()` in `optimize_reports.py`.
|
||||||
* `starting_balance`: Starting balance used for backtesting.
|
|
||||||
|
|
||||||
This function needs to return a floating point number (`float`). Smaller numbers will be interpreted as better results. The parameters and balancing for this is up to you.
|
This function needs to return a floating point number (`float`). Smaller numbers will be interpreted as better results. The parameters and balancing for this is up to you.
|
||||||
|
|
||||||
@@ -83,11 +75,9 @@ This function needs to return a floating point number (`float`). Smaller numbers
|
|||||||
|
|
||||||
## Overriding pre-defined spaces
|
## Overriding pre-defined spaces
|
||||||
|
|
||||||
To override a pre-defined space (`roi_space`, `generate_roi_table`, `stoploss_space`, `trailing_space`, `max_open_trades_space`), define a nested class called Hyperopt and define the required spaces as follows:
|
To override a pre-defined space (`roi_space`, `generate_roi_table`, `stoploss_space`, `trailing_space`), define a nested class called Hyperopt and define the required spaces as follows:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from freqtrade.optimize.space import Categorical, Dimension, Integer, SKDecimal
|
|
||||||
|
|
||||||
class MyAwesomeStrategy(IStrategy):
|
class MyAwesomeStrategy(IStrategy):
|
||||||
class HyperOpt:
|
class HyperOpt:
|
||||||
# Define a custom stoploss space.
|
# Define a custom stoploss space.
|
||||||
@@ -104,39 +94,6 @@ class MyAwesomeStrategy(IStrategy):
|
|||||||
SKDecimal(0.01, 0.07, decimals=3, name='roi_p2'),
|
SKDecimal(0.01, 0.07, decimals=3, name='roi_p2'),
|
||||||
SKDecimal(0.01, 0.20, decimals=3, name='roi_p3'),
|
SKDecimal(0.01, 0.20, decimals=3, name='roi_p3'),
|
||||||
]
|
]
|
||||||
|
|
||||||
def generate_roi_table(params: Dict) -> dict[int, float]:
|
|
||||||
|
|
||||||
roi_table = {}
|
|
||||||
roi_table[0] = params['roi_p1'] + params['roi_p2'] + params['roi_p3']
|
|
||||||
roi_table[params['roi_t3']] = params['roi_p1'] + params['roi_p2']
|
|
||||||
roi_table[params['roi_t3'] + params['roi_t2']] = params['roi_p1']
|
|
||||||
roi_table[params['roi_t3'] + params['roi_t2'] + params['roi_t1']] = 0
|
|
||||||
|
|
||||||
return roi_table
|
|
||||||
|
|
||||||
def trailing_space() -> List[Dimension]:
|
|
||||||
# All parameters here are mandatory, you can only modify their type or the range.
|
|
||||||
return [
|
|
||||||
# Fixed to true, if optimizing trailing_stop we assume to use trailing stop at all times.
|
|
||||||
Categorical([True], name='trailing_stop'),
|
|
||||||
|
|
||||||
SKDecimal(0.01, 0.35, decimals=3, name='trailing_stop_positive'),
|
|
||||||
# 'trailing_stop_positive_offset' should be greater than 'trailing_stop_positive',
|
|
||||||
# so this intermediate parameter is used as the value of the difference between
|
|
||||||
# them. The value of the 'trailing_stop_positive_offset' is constructed in the
|
|
||||||
# generate_trailing_params() method.
|
|
||||||
# This is similar to the hyperspace dimensions used for constructing the ROI tables.
|
|
||||||
SKDecimal(0.001, 0.1, decimals=3, name='trailing_stop_positive_offset_p1'),
|
|
||||||
|
|
||||||
Categorical([True, False], name='trailing_only_offset_is_reached'),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Define a custom max_open_trades space
|
|
||||||
def max_open_trades_space(self) -> List[Dimension]:
|
|
||||||
return [
|
|
||||||
Integer(-1, 10, name='max_open_trades'),
|
|
||||||
]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
@@ -144,7 +101,7 @@ class MyAwesomeStrategy(IStrategy):
|
|||||||
|
|
||||||
### Dynamic parameters
|
### Dynamic parameters
|
||||||
|
|
||||||
Parameters can also be defined dynamically, but must be available to the instance once the [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
|
Parameters can also be defined dynamically, but must be available to the instance once the * [`bot_start()` callback](strategy-callbacks.md#bot-start) has been called.
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
|
|
||||||
|
|||||||
@@ -1,153 +0,0 @@
|
|||||||
# Orderflow data
|
|
||||||
|
|
||||||
This guide walks you through utilizing public trade data for advanced orderflow analysis in Freqtrade.
|
|
||||||
|
|
||||||
!!! Warning "Experimental Feature"
|
|
||||||
The orderflow feature is currently in beta and may be subject to changes in future releases. Please report any issues or feedback on the [Freqtrade GitHub repository](https://github.com/freqtrade/freqtrade/issues).
|
|
||||||
It's also currently not been tested with freqAI - and combining these two features is considered out of scope at this point.
|
|
||||||
|
|
||||||
!!! Warning "Performance"
|
|
||||||
Orderflow requires raw trades data. This data is rather large, and can cause a slow initial startup, when freqtrade needs to download the trades data for the last X candles. Additionally, enabling this feature will cause increased memory usage. Please ensure to have sufficient resources available.
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
### Enable Public Trades
|
|
||||||
|
|
||||||
In your `config.json` file, set the `use_public_trades` option to true under the `exchange` section.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"exchange": {
|
|
||||||
...
|
|
||||||
"use_public_trades": true,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configure Orderflow Processing
|
|
||||||
|
|
||||||
Define your desired settings for orderflow processing within the orderflow section of config.json. Here, you can adjust factors like:
|
|
||||||
|
|
||||||
- `cache_size`: How many previous orderflow candles are saved into cache instead of calculated every new candle
|
|
||||||
- `max_candles`: Filter how many candles would you like to get trades data for.
|
|
||||||
- `scale`: This controls the price bin size for the footprint chart.
|
|
||||||
- `stacked_imbalance_range`: Defines the minimum consecutive imbalanced price levels required for consideration.
|
|
||||||
- `imbalance_volume`: Filters out imbalances with volume below this threshold.
|
|
||||||
- `imbalance_ratio`: Filters out imbalances with a ratio (difference between ask and bid volume) lower than this value.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"orderflow": {
|
|
||||||
"cache_size": 1000,
|
|
||||||
"max_candles": 1500,
|
|
||||||
"scale": 0.5,
|
|
||||||
"stacked_imbalance_range": 3, // needs at least this amount of imbalance next to each other
|
|
||||||
"imbalance_volume": 1, // filters out below
|
|
||||||
"imbalance_ratio": 3 // filters out ratio lower than
|
|
||||||
},
|
|
||||||
```
|
|
||||||
|
|
||||||
## Downloading Trade Data for Backtesting
|
|
||||||
|
|
||||||
To download historical trade data for backtesting, use the --dl-trades flag with the freqtrade download-data command.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade download-data -p BTC/USDT:USDT --timerange 20230101- --trading-mode futures --timeframes 5m --dl-trades
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Warning "Data availability"
|
|
||||||
Not all exchanges provide public trade data. For supported exchanges, freqtrade will warn you if public trade data is not available if you start downloading data with the `--dl-trades` flag.
|
|
||||||
|
|
||||||
## Accessing Orderflow Data
|
|
||||||
|
|
||||||
Once activated, several new columns become available in your dataframe:
|
|
||||||
|
|
||||||
``` python
|
|
||||||
|
|
||||||
dataframe["trades"] # Contains information about each individual trade.
|
|
||||||
dataframe["orderflow"] # Represents a footprint chart dict (see below)
|
|
||||||
dataframe["imbalances"] # Contains information about imbalances in the order flow.
|
|
||||||
dataframe["bid"] # Total bid volume
|
|
||||||
dataframe["ask"] # Total ask volume
|
|
||||||
dataframe["delta"] # Difference between ask and bid volume.
|
|
||||||
dataframe["min_delta"] # Minimum delta within the candle
|
|
||||||
dataframe["max_delta"] # Maximum delta within the candle
|
|
||||||
dataframe["total_trades"] # Total number of trades
|
|
||||||
dataframe["stacked_imbalances_bid"] # Price level of stacked bid imbalance
|
|
||||||
dataframe["stacked_imbalances_ask"] # Price level of stacked ask imbalance
|
|
||||||
```
|
|
||||||
|
|
||||||
You can access these columns in your strategy code for further analysis. Here's an example:
|
|
||||||
|
|
||||||
``` python
|
|
||||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
|
||||||
# Calculating cumulative delta
|
|
||||||
dataframe["cum_delta"] = cumulative_delta(dataframe["delta"])
|
|
||||||
# Accessing total trades
|
|
||||||
total_trades = dataframe["total_trades"]
|
|
||||||
...
|
|
||||||
|
|
||||||
def cumulative_delta(delta: Series):
|
|
||||||
cumdelta = delta.cumsum()
|
|
||||||
return cumdelta
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
### Footprint chart (`dataframe["orderflow"]`)
|
|
||||||
|
|
||||||
This column provides a detailed breakdown of buy and sell orders at different price levels, offering valuable insights into order flow dynamics. The `scale` parameter in your configuration determines the price bin size for this representation
|
|
||||||
|
|
||||||
The `orderflow` column contains a dict with the following structure:
|
|
||||||
|
|
||||||
``` output
|
|
||||||
{
|
|
||||||
"price": {
|
|
||||||
"bid_amount": 0.0,
|
|
||||||
"ask_amount": 0.0,
|
|
||||||
"bid": 0,
|
|
||||||
"ask": 0,
|
|
||||||
"delta": 0.0,
|
|
||||||
"total_volume": 0.0,
|
|
||||||
"total_trades": 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Orderflow column explanation
|
|
||||||
|
|
||||||
- key: Price bin - binned at `scale` intervals
|
|
||||||
- `bid_amount`: Total volume bought at each price level.
|
|
||||||
- `ask_amount`: Total volume sold at each price level.
|
|
||||||
- `bid`: Number of buy orders at each price level.
|
|
||||||
- `ask`: Number of sell orders at each price level.
|
|
||||||
- `delta`: Difference between ask and bid volume at each price level.
|
|
||||||
- `total_volume`: Total volume (ask amount + bid amount) at each price level.
|
|
||||||
- `total_trades`: Total number of trades (ask + bid) at each price level.
|
|
||||||
|
|
||||||
By leveraging these features, you can gain valuable insights into market sentiment and potential trading opportunities based on order flow analysis.
|
|
||||||
|
|
||||||
### Raw trades data (`dataframe["trades"]`)
|
|
||||||
|
|
||||||
List with the individual trades that occurred during the candle. This data can be used for more granular analysis of order flow dynamics.
|
|
||||||
|
|
||||||
Each individual entry contains a dict with the following keys:
|
|
||||||
|
|
||||||
- `timestamp`: Timestamp of the trade.
|
|
||||||
- `date`: Date of the trade.
|
|
||||||
- `price`: Price of the trade.
|
|
||||||
- `amount`: Volume of the trade.
|
|
||||||
- `side`: Buy or sell.
|
|
||||||
- `id`: Unique identifier for the trade.
|
|
||||||
- `cost`: Total cost of the trade (price * amount).
|
|
||||||
|
|
||||||
### Imbalances (`dataframe["imbalances"]`)
|
|
||||||
|
|
||||||
This column provides a dict with information about imbalances in the order flow. An imbalance occurs when there is a significant difference between the ask and bid volume at a given price level.
|
|
||||||
|
|
||||||
Each row looks as follows - with price as index, and the corresponding bid and ask imbalance values as columns
|
|
||||||
|
|
||||||
``` output
|
|
||||||
{
|
|
||||||
"price": {
|
|
||||||
"bid_imbalance": False,
|
|
||||||
"ask_imbalance": False
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -114,46 +114,8 @@ services:
|
|||||||
--strategy SampleStrategy
|
--strategy SampleStrategy
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use whatever naming convention you want, freqtrade1 and 2 are arbitrary. Note, that you will need to use different database files, port mappings and telegram configurations for each instance, as mentioned above.
|
You can use whatever naming convention you want, freqtrade1 and 2 are arbitrary. Note, that you will need to use different database files, port mappings and telegram configurations for each instance, as mentioned above.
|
||||||
|
|
||||||
## Use a different database system
|
|
||||||
|
|
||||||
Freqtrade is using SQLAlchemy, which supports multiple different database systems. As such, a multitude of database systems should be supported.
|
|
||||||
Freqtrade does not depend or install any additional database driver. Please refer to the [SQLAlchemy docs](https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls) on installation instructions for the respective database systems.
|
|
||||||
|
|
||||||
The following systems have been tested and are known to work with freqtrade:
|
|
||||||
|
|
||||||
* sqlite (default)
|
|
||||||
* PostgreSQL
|
|
||||||
* MariaDB
|
|
||||||
|
|
||||||
!!! Warning
|
|
||||||
By using one of the below database systems, you acknowledge that you know how to manage such a system. The freqtrade team will not provide any support with setup or maintenance (or backups) of the below database systems.
|
|
||||||
|
|
||||||
### PostgreSQL
|
|
||||||
|
|
||||||
Installation:
|
|
||||||
`pip install psycopg2-binary`
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
`... --db-url postgresql+psycopg2://<username>:<password>@localhost:5432/<database>`
|
|
||||||
|
|
||||||
Freqtrade will automatically create the tables necessary upon startup.
|
|
||||||
|
|
||||||
If you're running different instances of Freqtrade, you must either setup one database per Instance or use different users / schemas for your connections.
|
|
||||||
|
|
||||||
### MariaDB / MySQL
|
|
||||||
|
|
||||||
Freqtrade supports MariaDB by using SQLAlchemy, which supports multiple different database systems.
|
|
||||||
|
|
||||||
Installation:
|
|
||||||
`pip install pymysql`
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
`... --db-url mysql+pymysql://<username>:<password>@localhost:3306/<database>`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Configure the bot running as a systemd service
|
## Configure the bot running as a systemd service
|
||||||
|
|
||||||
@@ -230,7 +192,7 @@ $RepeatedMsgReduction on
|
|||||||
|
|
||||||
### Logging to journald
|
### Logging to journald
|
||||||
|
|
||||||
This needs the `cysystemd` python package installed as dependency (`pip install cysystemd`), which is not available on Windows. Hence, the whole journald logging functionality is not available for a bot running on Windows.
|
This needs the `systemd` python package installed as the dependency, which is not available on Windows. Hence, the whole journald logging functionality is not available for a bot running on Windows.
|
||||||
|
|
||||||
To send Freqtrade log messages to `journald` system service use the `--logfile` command line option with the value in the following format:
|
To send Freqtrade log messages to `journald` system service use the `--logfile` command line option with the value in the following format:
|
||||||
|
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 94 KiB |
|
Before Width: | Height: | Size: 91 KiB |
|
Before Width: | Height: | Size: 133 KiB |
|
Before Width: | Height: | Size: 135 KiB |
|
Before Width: | Height: | Size: 242 KiB |
|
Before Width: | Height: | Size: 241 KiB |
|
Before Width: | Height: | Size: 209 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 51 KiB |
|
Before Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 79 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 29 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 18 KiB |
@@ -10,14 +10,12 @@ To learn how to get data for the pairs and exchange you're interested in, head o
|
|||||||
```
|
```
|
||||||
usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
||||||
[-d PATH] [--userdir PATH] [-s NAME]
|
[-d PATH] [--userdir PATH] [-s NAME]
|
||||||
[--strategy-path PATH]
|
[--strategy-path PATH] [-i TIMEFRAME]
|
||||||
[--recursive-strategy-search]
|
[--timerange TIMERANGE]
|
||||||
[--freqaimodel NAME] [--freqaimodel-path PATH]
|
[--data-format-ohlcv {json,jsongz,hdf5}]
|
||||||
[-i TIMEFRAME] [--timerange TIMERANGE]
|
|
||||||
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
|
||||||
[--max-open-trades INT]
|
[--max-open-trades INT]
|
||||||
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
|
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
|
||||||
[-p PAIRS [PAIRS ...]] [--eps]
|
[-p PAIRS [PAIRS ...]] [--eps] [--dmmp]
|
||||||
[--enable-protections]
|
[--enable-protections]
|
||||||
[--dry-run-wallet DRY_RUN_WALLET]
|
[--dry-run-wallet DRY_RUN_WALLET]
|
||||||
[--timeframe-detail TIMEFRAME_DETAIL]
|
[--timeframe-detail TIMEFRAME_DETAIL]
|
||||||
@@ -26,17 +24,16 @@ usage: freqtrade backtesting [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||||||
[--export-filename PATH]
|
[--export-filename PATH]
|
||||||
[--breakdown {day,week,month} [{day,week,month} ...]]
|
[--breakdown {day,week,month} [{day,week,month} ...]]
|
||||||
[--cache {none,day,week,month}]
|
[--cache {none,day,week,month}]
|
||||||
[--freqai-backtest-live-models]
|
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-i TIMEFRAME, --timeframe TIMEFRAME
|
-i TIMEFRAME, --timeframe TIMEFRAME
|
||||||
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
|
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
|
||||||
--timerange TIMERANGE
|
--timerange TIMERANGE
|
||||||
Specify what timerange of data to use.
|
Specify what timerange of data to use.
|
||||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
--data-format-ohlcv {json,jsongz,hdf5}
|
||||||
Storage format for downloaded candle (OHLCV) data.
|
Storage format for downloaded candle (OHLCV) data.
|
||||||
(default: `feather`).
|
(default: `json`).
|
||||||
--max-open-trades INT
|
--max-open-trades INT
|
||||||
Override the value of the `max_open_trades`
|
Override the value of the `max_open_trades`
|
||||||
configuration setting.
|
configuration setting.
|
||||||
@@ -51,6 +48,10 @@ options:
|
|||||||
--eps, --enable-position-stacking
|
--eps, --enable-position-stacking
|
||||||
Allow buying the same pair multiple times (position
|
Allow buying the same pair multiple times (position
|
||||||
stacking).
|
stacking).
|
||||||
|
--dmmp, --disable-max-market-positions
|
||||||
|
Disable applying `max_open_trades` during backtest
|
||||||
|
(same as setting `max_open_trades` to a very high
|
||||||
|
number).
|
||||||
--enable-protections, --enableprotections
|
--enable-protections, --enableprotections
|
||||||
Enable protections for backtesting.Will slow
|
Enable protections for backtesting.Will slow
|
||||||
backtesting down by a considerable amount, but will
|
backtesting down by a considerable amount, but will
|
||||||
@@ -79,13 +80,10 @@ options:
|
|||||||
--cache {none,day,week,month}
|
--cache {none,day,week,month}
|
||||||
Load a cached backtest result no older than specified
|
Load a cached backtest result no older than specified
|
||||||
age (default: day).
|
age (default: day).
|
||||||
--freqai-backtest-live-models
|
|
||||||
Run backtest with ready models.
|
|
||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
--logfile FILE, --log-file FILE
|
--logfile FILE Log to the file specified. Special values are:
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
'syslog', 'journald'. See the documentation for more
|
||||||
details.
|
details.
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
@@ -94,7 +92,7 @@ Common arguments:
|
|||||||
`userdir/config.json` or `config.json` whichever
|
`userdir/config.json` or `config.json` whichever
|
||||||
exists). Multiple --config options may be used. Can be
|
exists). Multiple --config options may be used. Can be
|
||||||
set to `-` to read config from stdin.
|
set to `-` to read config from stdin.
|
||||||
-d PATH, --datadir PATH, --data-dir PATH
|
-d PATH, --datadir PATH
|
||||||
Path to directory with historical backtesting data.
|
Path to directory with historical backtesting data.
|
||||||
--userdir PATH, --user-data-dir PATH
|
--userdir PATH, --user-data-dir PATH
|
||||||
Path to userdata directory.
|
Path to userdata directory.
|
||||||
@@ -104,12 +102,6 @@ Strategy arguments:
|
|||||||
Specify strategy class name which will be used by the
|
Specify strategy class name which will be used by the
|
||||||
bot.
|
bot.
|
||||||
--strategy-path PATH Specify additional strategy lookup path.
|
--strategy-path PATH Specify additional strategy lookup path.
|
||||||
--recursive-strategy-search
|
|
||||||
Recursively search for a strategy in the strategies
|
|
||||||
folder.
|
|
||||||
--freqaimodel NAME Specify a custom freqaimodels.
|
|
||||||
--freqaimodel-path PATH
|
|
||||||
Specify additional lookup path for freqaimodels.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -178,11 +170,11 @@ freqtrade backtesting --strategy AwesomeStrategy --dry-run-wallet 1000
|
|||||||
|
|
||||||
Using a different on-disk historical candle (OHLCV) data source
|
Using a different on-disk historical candle (OHLCV) data source
|
||||||
|
|
||||||
Assume you downloaded the history data from the Binance exchange and kept it in the `user_data/data/binance-20180101` directory.
|
Assume you downloaded the history data from the Bittrex exchange and kept it in the `user_data/data/bittrex-20180101` directory.
|
||||||
You can then use this data for backtesting as follows:
|
You can then use this data for backtesting as follows:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
freqtrade backtesting --strategy AwesomeStrategy --datadir user_data/data/binance-20180101
|
freqtrade backtesting --strategy AwesomeStrategy --datadir user_data/data/bittrex-20180101
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -260,48 +252,46 @@ The most important in the backtesting is to understand the result.
|
|||||||
A backtesting result will look like that:
|
A backtesting result will look like that:
|
||||||
|
|
||||||
```
|
```
|
||||||
================================================ BACKTESTING REPORT =================================================
|
========================================================= BACKTESTING REPORT =========================================================
|
||||||
| Pair | Trades | Avg Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins Draws Loss Win% |
|
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins Draws Loss Win% |
|
||||||
|----------+--------+----------------+------------------+----------------+--------------+--------------------------|
|
|:---------|--------:|---------------:|---------------:|-----------------:|---------------:|:-------------|-------------------------:|
|
||||||
| ADA/BTC | 35 | -0.11 | -0.00019428 | -1.94 | 4:35:00 | 14 0 21 40.0 |
|
| ADA/BTC | 35 | -0.11 | -3.88 | -0.00019428 | -1.94 | 4:35:00 | 14 0 21 40.0 |
|
||||||
| ARK/BTC | 11 | -0.41 | -0.00022647 | -2.26 | 2:03:00 | 3 0 8 27.3 |
|
| ARK/BTC | 11 | -0.41 | -4.52 | -0.00022647 | -2.26 | 2:03:00 | 3 0 8 27.3 |
|
||||||
| BTS/BTC | 32 | 0.31 | 0.00048938 | 4.89 | 5:05:00 | 18 0 14 56.2 |
|
| BTS/BTC | 32 | 0.31 | 9.78 | 0.00048938 | 4.89 | 5:05:00 | 18 0 14 56.2 |
|
||||||
| DASH/BTC | 13 | -0.08 | -0.00005343 | -0.53 | 4:39:00 | 6 0 7 46.2 |
|
| DASH/BTC | 13 | -0.08 | -1.07 | -0.00005343 | -0.53 | 4:39:00 | 6 0 7 46.2 |
|
||||||
| ENG/BTC | 18 | 1.36 | 0.00122807 | 12.27 | 2:50:00 | 8 0 10 44.4 |
|
| ENG/BTC | 18 | 1.36 | 24.54 | 0.00122807 | 12.27 | 2:50:00 | 8 0 10 44.4 |
|
||||||
| EOS/BTC | 36 | 0.08 | 0.00015304 | 1.53 | 3:34:00 | 16 0 20 44.4 |
|
| EOS/BTC | 36 | 0.08 | 3.06 | 0.00015304 | 1.53 | 3:34:00 | 16 0 20 44.4 |
|
||||||
| ETC/BTC | 26 | 0.37 | 0.00047576 | 4.75 | 6:14:00 | 11 0 15 42.3 |
|
| ETC/BTC | 26 | 0.37 | 9.51 | 0.00047576 | 4.75 | 6:14:00 | 11 0 15 42.3 |
|
||||||
| ETH/BTC | 33 | 0.30 | 0.00049856 | 4.98 | 7:31:00 | 16 0 17 48.5 |
|
| ETH/BTC | 33 | 0.30 | 9.96 | 0.00049856 | 4.98 | 7:31:00 | 16 0 17 48.5 |
|
||||||
| IOTA/BTC | 32 | 0.03 | 0.00005444 | 0.54 | 3:12:00 | 14 0 18 43.8 |
|
| IOTA/BTC | 32 | 0.03 | 1.09 | 0.00005444 | 0.54 | 3:12:00 | 14 0 18 43.8 |
|
||||||
| LSK/BTC | 15 | 1.75 | 0.00131413 | 13.13 | 2:58:00 | 6 0 9 40.0 |
|
| LSK/BTC | 15 | 1.75 | 26.26 | 0.00131413 | 13.13 | 2:58:00 | 6 0 9 40.0 |
|
||||||
| LTC/BTC | 32 | -0.04 | -0.00006886 | -0.69 | 4:49:00 | 11 0 21 34.4 |
|
| LTC/BTC | 32 | -0.04 | -1.38 | -0.00006886 | -0.69 | 4:49:00 | 11 0 21 34.4 |
|
||||||
| NANO/BTC | 17 | 1.26 | 0.00107058 | 10.70 | 1:55:00 | 10 0 7 58.5 |
|
| NANO/BTC | 17 | 1.26 | 21.39 | 0.00107058 | 10.70 | 1:55:00 | 10 0 7 58.5 |
|
||||||
| NEO/BTC | 23 | 0.82 | 0.00094936 | 9.48 | 2:59:00 | 10 0 13 43.5 |
|
| NEO/BTC | 23 | 0.82 | 18.97 | 0.00094936 | 9.48 | 2:59:00 | 10 0 13 43.5 |
|
||||||
| REQ/BTC | 9 | 1.17 | 0.00052734 | 5.27 | 3:47:00 | 4 0 5 44.4 |
|
| REQ/BTC | 9 | 1.17 | 10.54 | 0.00052734 | 5.27 | 3:47:00 | 4 0 5 44.4 |
|
||||||
| XLM/BTC | 16 | 1.22 | 0.00097800 | 9.77 | 3:15:00 | 7 0 9 43.8 |
|
| XLM/BTC | 16 | 1.22 | 19.54 | 0.00097800 | 9.77 | 3:15:00 | 7 0 9 43.8 |
|
||||||
| XMR/BTC | 23 | -0.18 | -0.00020696 | -2.07 | 5:30:00 | 12 0 11 52.2 |
|
| XMR/BTC | 23 | -0.18 | -4.13 | -0.00020696 | -2.07 | 5:30:00 | 12 0 11 52.2 |
|
||||||
| XRP/BTC | 35 | 0.66 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
| XRP/BTC | 35 | 0.66 | 22.96 | 0.00114897 | 11.48 | 3:49:00 | 12 0 23 34.3 |
|
||||||
| ZEC/BTC | 22 | -0.46 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
| ZEC/BTC | 22 | -0.46 | -10.18 | -0.00050971 | -5.09 | 2:22:00 | 7 0 15 31.8 |
|
||||||
| TOTAL | 429 | 0.36 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
| TOTAL | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 0 243 43.4 |
|
||||||
============================================= LEFT OPEN TRADES REPORT =============================================
|
========================================================= EXIT REASON STATS ==========================================================
|
||||||
| Pair | Trades | Avg Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
|
||||||
|----------+---------+----------------+------------------+----------------+----------------+---------------------|
|
|
||||||
| ADA/BTC | 1 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
|
||||||
| LTC/BTC | 1 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
|
||||||
| TOTAL | 2 | 0.78 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
|
||||||
==================== EXIT REASON STATS ====================
|
|
||||||
| Exit Reason | Exits | Wins | Draws | Losses |
|
| Exit Reason | Exits | Wins | Draws | Losses |
|
||||||
|--------------------+---------+-------+--------+---------|
|
|:-------------------|--------:|------:|-------:|--------:|
|
||||||
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
| trailing_stop_loss | 205 | 150 | 0 | 55 |
|
||||||
| stop_loss | 166 | 0 | 0 | 166 |
|
| stop_loss | 166 | 0 | 0 | 166 |
|
||||||
| exit_signal | 56 | 36 | 0 | 20 |
|
| exit_signal | 56 | 36 | 0 | 20 |
|
||||||
| force_exit | 2 | 0 | 0 | 2 |
|
| force_exit | 2 | 0 | 0 | 2 |
|
||||||
|
====================================================== LEFT OPEN TRADES REPORT ======================================================
|
||||||
|
| Pair | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Win Draw Loss Win% |
|
||||||
|
|:---------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|--------------------:|
|
||||||
|
| ADA/BTC | 1 | 0.89 | 0.89 | 0.00004434 | 0.44 | 6:00:00 | 1 0 0 100 |
|
||||||
|
| LTC/BTC | 1 | 0.68 | 0.68 | 0.00003421 | 0.34 | 2:00:00 | 1 0 0 100 |
|
||||||
|
| TOTAL | 2 | 0.78 | 1.57 | 0.00007855 | 0.78 | 4:00:00 | 2 0 0 100 |
|
||||||
================== SUMMARY METRICS ==================
|
================== SUMMARY METRICS ==================
|
||||||
| Metric | Value |
|
| Metric | Value |
|
||||||
|-----------------------------+---------------------|
|
|-----------------------------+---------------------|
|
||||||
| Backtesting from | 2019-01-01 00:00:00 |
|
| Backtesting from | 2019-01-01 00:00:00 |
|
||||||
| Backtesting to | 2019-05-01 00:00:00 |
|
| Backtesting to | 2019-05-01 00:00:00 |
|
||||||
| Trading Mode | Spot |
|
|
||||||
| Max open trades | 3 |
|
| Max open trades | 3 |
|
||||||
| | |
|
| | |
|
||||||
| Total/Daily Avg Trades | 429 / 3.575 |
|
| Total/Daily Avg Trades | 429 / 3.575 |
|
||||||
@@ -310,11 +300,7 @@ A backtesting result will look like that:
|
|||||||
| Absolute profit | 0.00762792 BTC |
|
| Absolute profit | 0.00762792 BTC |
|
||||||
| Total profit % | 76.2% |
|
| Total profit % | 76.2% |
|
||||||
| CAGR % | 460.87% |
|
| CAGR % | 460.87% |
|
||||||
| Sortino | 1.88 |
|
|
||||||
| Sharpe | 2.97 |
|
|
||||||
| Calmar | 6.29 |
|
|
||||||
| Profit factor | 1.11 |
|
| Profit factor | 1.11 |
|
||||||
| Expectancy (Ratio) | -0.15 (-0.05) |
|
|
||||||
| Avg. stake amount | 0.001 BTC |
|
| Avg. stake amount | 0.001 BTC |
|
||||||
| Total trade volume | 0.429 BTC |
|
| Total trade volume | 0.429 BTC |
|
||||||
| | |
|
| | |
|
||||||
@@ -333,7 +319,6 @@ A backtesting result will look like that:
|
|||||||
| Days win/draw/lose | 12 / 82 / 25 |
|
| Days win/draw/lose | 12 / 82 / 25 |
|
||||||
| Avg. Duration Winners | 4:23:00 |
|
| Avg. Duration Winners | 4:23:00 |
|
||||||
| Avg. Duration Loser | 6:55:00 |
|
| Avg. Duration Loser | 6:55:00 |
|
||||||
| Max Consecutive Wins / Loss | 3 / 4 |
|
|
||||||
| Rejected Entry signals | 3089 |
|
| Rejected Entry signals | 3089 |
|
||||||
| Entry/Exit Timeouts | 0 / 0 |
|
| Entry/Exit Timeouts | 0 / 0 |
|
||||||
| Canceled Trade Entries | 34 |
|
| Canceled Trade Entries | 34 |
|
||||||
@@ -367,7 +352,7 @@ here:
|
|||||||
The bot has made `429` trades for an average duration of `4:12:00`, with a performance of `76.20%` (profit), that means it has
|
The bot has made `429` trades for an average duration of `4:12:00`, with a performance of `76.20%` (profit), that means it has
|
||||||
earned a total of `0.00762792 BTC` starting with a capital of 0.01 BTC.
|
earned a total of `0.00762792 BTC` starting with a capital of 0.01 BTC.
|
||||||
|
|
||||||
The column `Avg Profit %` shows the average profit for all trades made.
|
The column `Avg Profit %` shows the average profit for all trades made while the column `Cum Profit %` sums up all the profits/losses.
|
||||||
The column `Tot Profit %` shows instead the total profit % in relation to the starting balance.
|
The column `Tot Profit %` shows instead the total profit % in relation to the starting balance.
|
||||||
In the above results, we have a starting balance of 0.01 BTC and an absolute profit of 0.00762792 BTC - so the `Tot Profit %` will be `(0.00762792 / 0.01) * 100 ~= 76.2%`.
|
In the above results, we have a starting balance of 0.01 BTC and an absolute profit of 0.00762792 BTC - so the `Tot Profit %` will be `(0.00762792 / 0.01) * 100 ~= 76.2%`.
|
||||||
|
|
||||||
@@ -407,7 +392,6 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||||||
|-----------------------------+---------------------|
|
|-----------------------------+---------------------|
|
||||||
| Backtesting from | 2019-01-01 00:00:00 |
|
| Backtesting from | 2019-01-01 00:00:00 |
|
||||||
| Backtesting to | 2019-05-01 00:00:00 |
|
| Backtesting to | 2019-05-01 00:00:00 |
|
||||||
| Trading Mode | Spot |
|
|
||||||
| Max open trades | 3 |
|
| Max open trades | 3 |
|
||||||
| | |
|
| | |
|
||||||
| Total/Daily Avg Trades | 429 / 3.575 |
|
| Total/Daily Avg Trades | 429 / 3.575 |
|
||||||
@@ -416,11 +400,7 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||||||
| Absolute profit | 0.00762792 BTC |
|
| Absolute profit | 0.00762792 BTC |
|
||||||
| Total profit % | 76.2% |
|
| Total profit % | 76.2% |
|
||||||
| CAGR % | 460.87% |
|
| CAGR % | 460.87% |
|
||||||
| Sortino | 1.88 |
|
|
||||||
| Sharpe | 2.97 |
|
|
||||||
| Calmar | 6.29 |
|
|
||||||
| Profit factor | 1.11 |
|
| Profit factor | 1.11 |
|
||||||
| Expectancy (Ratio) | -0.15 (-0.05) |
|
|
||||||
| Avg. stake amount | 0.001 BTC |
|
| Avg. stake amount | 0.001 BTC |
|
||||||
| Total trade volume | 0.429 BTC |
|
| Total trade volume | 0.429 BTC |
|
||||||
| | |
|
| | |
|
||||||
@@ -439,7 +419,6 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||||||
| Days win/draw/lose | 12 / 82 / 25 |
|
| Days win/draw/lose | 12 / 82 / 25 |
|
||||||
| Avg. Duration Winners | 4:23:00 |
|
| Avg. Duration Winners | 4:23:00 |
|
||||||
| Avg. Duration Loser | 6:55:00 |
|
| Avg. Duration Loser | 6:55:00 |
|
||||||
| Max Consecutive Wins / Loss | 3 / 4 |
|
|
||||||
| Rejected Entry signals | 3089 |
|
| Rejected Entry signals | 3089 |
|
||||||
| Entry/Exit Timeouts | 0 / 0 |
|
| Entry/Exit Timeouts | 0 / 0 |
|
||||||
| Canceled Trade Entries | 34 |
|
| Canceled Trade Entries | 34 |
|
||||||
@@ -462,25 +441,20 @@ It contains some useful key metrics about performance of your strategy on backte
|
|||||||
|
|
||||||
- `Backtesting from` / `Backtesting to`: Backtesting range (usually defined with the `--timerange` option).
|
- `Backtesting from` / `Backtesting to`: Backtesting range (usually defined with the `--timerange` option).
|
||||||
- `Max open trades`: Setting of `max_open_trades` (or `--max-open-trades`) - or number of pairs in the pairlist (whatever is lower).
|
- `Max open trades`: Setting of `max_open_trades` (or `--max-open-trades`) - or number of pairs in the pairlist (whatever is lower).
|
||||||
- `Trading Mode`: Spot or Futures trading.
|
|
||||||
- `Total/Daily Avg Trades`: Identical to the total trades of the backtest output table / Total trades divided by the backtesting duration in days (this will give you information about how many trades to expect from the strategy).
|
- `Total/Daily Avg Trades`: Identical to the total trades of the backtest output table / Total trades divided by the backtesting duration in days (this will give you information about how many trades to expect from the strategy).
|
||||||
- `Starting balance`: Start balance - as given by dry-run-wallet (config or command line).
|
- `Starting balance`: Start balance - as given by dry-run-wallet (config or command line).
|
||||||
- `Final balance`: Final balance - starting balance + absolute profit.
|
- `Final balance`: Final balance - starting balance + absolute profit.
|
||||||
- `Absolute profit`: Profit made in stake currency.
|
- `Absolute profit`: Profit made in stake currency.
|
||||||
- `Total profit %`: Total profit. Aligned to the `TOTAL` row's `Tot Profit %` from the first table. Calculated as `(End capital − Starting capital) / Starting capital`.
|
- `Total profit %`: Total profit. Aligned to the `TOTAL` row's `Tot Profit %` from the first table. Calculated as `(End capital − Starting capital) / Starting capital`.
|
||||||
- `CAGR %`: Compound annual growth rate.
|
- `CAGR %`: Compound annual growth rate.
|
||||||
- `Sortino`: Annualized Sortino ratio.
|
|
||||||
- `Sharpe`: Annualized Sharpe ratio.
|
|
||||||
- `Calmar`: Annualized Calmar ratio.
|
|
||||||
- `Profit factor`: profit / loss.
|
- `Profit factor`: profit / loss.
|
||||||
- `Avg. stake amount`: Average stake amount, either `stake_amount` or the average when using dynamic stake amount.
|
- `Avg. stake amount`: Average stake amount, either `stake_amount` or the average when using dynamic stake amount.
|
||||||
- `Total trade volume`: Volume generated on the exchange to reach the above profit.
|
- `Total trade volume`: Volume generated on the exchange to reach the above profit.
|
||||||
- `Best Pair` / `Worst Pair`: Best and worst performing pair, and it's corresponding `Tot Profit %`.
|
- `Best Pair` / `Worst Pair`: Best and worst performing pair, and it's corresponding `Cum Profit %`.
|
||||||
- `Best Trade` / `Worst Trade`: Biggest single winning trade and biggest single losing trade.
|
- `Best Trade` / `Worst Trade`: Biggest single winning trade and biggest single losing trade.
|
||||||
- `Best day` / `Worst day`: Best and worst day based on daily profit.
|
- `Best day` / `Worst day`: Best and worst day based on daily profit.
|
||||||
- `Days win/draw/lose`: Winning / Losing days (draws are usually days without closed trade).
|
- `Days win/draw/lose`: Winning / Losing days (draws are usually days without closed trade).
|
||||||
- `Avg. Duration Winners` / `Avg. Duration Loser`: Average durations for winning and losing trades.
|
- `Avg. Duration Winners` / `Avg. Duration Loser`: Average durations for winning and losing trades.
|
||||||
- `Max Consecutive Wins / Loss`: Maximum consecutive wins/losses in a row.
|
|
||||||
- `Rejected Entry signals`: Trade entry signals that could not be acted upon due to `max_open_trades` being reached.
|
- `Rejected Entry signals`: Trade entry signals that could not be acted upon due to `max_open_trades` being reached.
|
||||||
- `Entry/Exit Timeouts`: Entry/exit orders which did not fill (only applicable if custom pricing is used).
|
- `Entry/Exit Timeouts`: Entry/exit orders which did not fill (only applicable if custom pricing is used).
|
||||||
- `Canceled Trade Entries`: Number of trades that have been canceled by user request via `adjust_entry_price`.
|
- `Canceled Trade Entries`: Number of trades that have been canceled by user request via `adjust_entry_price`.
|
||||||
@@ -533,30 +507,28 @@ To save time, by default backtest will reuse a cached result from within the las
|
|||||||
|
|
||||||
### Further backtest-result analysis
|
### Further backtest-result analysis
|
||||||
|
|
||||||
To further analyze your backtest results, freqtrade will export the trades to file by default.
|
To further analyze your backtest results, you can [export the trades](#exporting-trades-to-file).
|
||||||
You can then load the trades to perform further analysis as shown in the [data analysis](strategy_analysis_example.md#load-backtest-results-to-pandas-dataframe) backtesting section.
|
You can then load the trades to perform further analysis as shown in the [data analysis](data-analysis.md#backtesting) backtesting section.
|
||||||
|
|
||||||
## Assumptions made by backtesting
|
## Assumptions made by backtesting
|
||||||
|
|
||||||
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
||||||
|
|
||||||
- Exchange [trading limits](#trading-limits-in-backtesting) are respected
|
- Exchange [trading limits](#trading-limits-in-backtesting) are respected
|
||||||
- Entries happen at open-price unless a custom price logic has been specified
|
- Entries happen at open-price
|
||||||
- All orders are filled at the requested price (no slippage) as long as the price is within the candle's high/low range
|
- All orders are filled at the requested price (no slippage, no unfilled orders)
|
||||||
- Exit-signal exits happen at open-price of the consecutive candle
|
- Exit-signal exits happen at open-price of the consecutive candle
|
||||||
- Exits free their trade slot for a new trade with a different pair
|
|
||||||
- Exit-signal is favored over Stoploss, because exit-signals are assumed to trigger on candle's open
|
- Exit-signal is favored over Stoploss, because exit-signals are assumed to trigger on candle's open
|
||||||
- ROI
|
- ROI
|
||||||
- Exits are compared to high - but the ROI value is used (e.g. ROI = 2%, high=5% - so the exit will be at 2%)
|
- exits are compared to high - but the ROI value is used (e.g. ROI = 2%, high=5% - so the exit will be at 2%)
|
||||||
- Exits are never "below the candle", so a ROI of 2% may result in a exit at 2.4% if low was at 2.4% profit
|
- exits are never "below the candle", so a ROI of 2% may result in a exit at 2.4% if low was at 2.4% profit
|
||||||
- ROI entries which came into effect on the triggering candle (e.g. `120: 0.02` for 1h candles, from `60: 0.05`) will use the candle's open as exit rate
|
- Forceexits caused by `<N>=-1` ROI entries use low as exit value, unless N falls on the candle open (e.g. `120: -1` for 1h candles)
|
||||||
- Force-exits caused by `<N>=-1` ROI entries use low as exit value, unless N falls on the candle open (e.g. `120: -1` for 1h candles)
|
|
||||||
- Stoploss exits happen exactly at stoploss price, even if low was lower, but the loss will be `2 * fees` higher than the stoploss price
|
- Stoploss exits happen exactly at stoploss price, even if low was lower, but the loss will be `2 * fees` higher than the stoploss price
|
||||||
- Stoploss is evaluated before ROI within one candle. So you can often see more trades with the `stoploss` exit reason comparing to the results obtained with the same strategy in the Dry Run/Live Trade modes
|
- Stoploss is evaluated before ROI within one candle. So you can often see more trades with the `stoploss` exit reason comparing to the results obtained with the same strategy in the Dry Run/Live Trade modes
|
||||||
- Low happens before high for stoploss, protecting capital first
|
- Low happens before high for stoploss, protecting capital first
|
||||||
- Trailing stoploss
|
- Trailing stoploss
|
||||||
- Trailing Stoploss is only adjusted if it's below the candle's low (otherwise it would be triggered)
|
- Trailing Stoploss is only adjusted if it's below the candle's low (otherwise it would be triggered)
|
||||||
- On trade entry candles that trigger trailing stoploss, the "minimum offset" (`stop_positive_offset`) is assumed (instead of high) - and the stop is calculated from this point. This rule is NOT applicable to custom-stoploss scenarios, since there's no information about the stoploss logic available.
|
- On trade entry candles that trigger trailing stoploss, the "minimum offset" (`stop_positive_offset`) is assumed (instead of high) - and the stop is calculated from this point
|
||||||
- High happens first - adjusting stoploss
|
- High happens first - adjusting stoploss
|
||||||
- Low uses the adjusted stoploss (so exits with large high-low difference are backtested correctly)
|
- Low uses the adjusted stoploss (so exits with large high-low difference are backtested correctly)
|
||||||
- ROI applies before trailing-stop, ensuring profits are "top-capped" at ROI if both ROI and trailing stop applies
|
- ROI applies before trailing-stop, ensuring profits are "top-capped" at ROI if both ROI and trailing stop applies
|
||||||
@@ -566,7 +538,6 @@ Since backtesting lacks some detailed information about what happens within a ca
|
|||||||
- Stoploss
|
- Stoploss
|
||||||
- ROI
|
- ROI
|
||||||
- Trailing stoploss
|
- Trailing stoploss
|
||||||
- Position reversals (futures only) happen if an entry signal in the other direction than the closing trade triggers at the candle the existing trade closes.
|
|
||||||
|
|
||||||
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
|
Taking these assumptions, backtesting tries to mirror real trading as closely as possible. However, backtesting will **never** replace running a strategy in dry-run mode.
|
||||||
Also, keep in mind that past results don't guarantee future success.
|
Also, keep in mind that past results don't guarantee future success.
|
||||||
@@ -575,13 +546,13 @@ In addition to the above assumptions, strategy authors should carefully read the
|
|||||||
|
|
||||||
### Trading limits in backtesting
|
### Trading limits in backtesting
|
||||||
|
|
||||||
Exchanges have certain trading limits, like minimum (and maximum) base currency, or minimum/maximum stake (quote) currency.
|
Exchanges have certain trading limits, like minimum base currency, or minimum stake (quote) currency.
|
||||||
These limits are usually listed in the exchange documentation as "trading rules" or similar and can be quite different between different pairs.
|
These limits are usually listed in the exchange documentation as "trading rules" or similar.
|
||||||
|
|
||||||
Backtesting (as well as live and dry-run) does honor these limits, and will ensure that a stoploss can be placed below this value - so the value will be slightly higher than what the exchange specifies.
|
Backtesting (as well as live and dry-run) does honor these limits, and will ensure that a stoploss can be placed below this value - so the value will be slightly higher than what the exchange specifies.
|
||||||
Freqtrade has however no information about historic limits.
|
Freqtrade has however no information about historic limits.
|
||||||
|
|
||||||
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50\$.
|
This can lead to situations where trading-limits are inflated by using a historic price, resulting in minimum amounts > 50$.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
@@ -600,7 +571,7 @@ These precision values are based on current exchange limits (as described in the
|
|||||||
|
|
||||||
## Improved backtest accuracy
|
## Improved backtest accuracy
|
||||||
|
|
||||||
One big limitation of backtesting is it's inability to know how prices moved intra-candle (was high before close, or vice-versa?).
|
One big limitation of backtesting is it's inability to know how prices moved intra-candle (was high before close, or viceversa?).
|
||||||
So assuming you run backtesting with a 1h timeframe, there will be 4 prices for that candle (Open, High, Low, Close).
|
So assuming you run backtesting with a 1h timeframe, there will be 4 prices for that candle (Open, High, Low, Close).
|
||||||
|
|
||||||
While backtesting does take some assumptions (read above) about this - this can never be perfect, and will always be biased in one way or the other.
|
While backtesting does take some assumptions (read above) about this - this can never be perfect, and will always be biased in one way or the other.
|
||||||
@@ -612,8 +583,7 @@ To utilize this, you can append `--timeframe-detail 5m` to your regular backtest
|
|||||||
freqtrade backtesting --strategy AwesomeStrategy --timeframe 1h --timeframe-detail 5m
|
freqtrade backtesting --strategy AwesomeStrategy --timeframe 1h --timeframe-detail 5m
|
||||||
```
|
```
|
||||||
|
|
||||||
This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe, and Entry orders will only be placed at the main timeframe, however Order fills and exit signals will be evaluated at the 5m candle, simulating intra-candle movements.
|
This will load 1h data as well as 5m data for the timeframe. The strategy will be analyzed with the 1h timeframe - and for every "open trade candle" (candles where a trade is open) the 5m data will be used to simulate intra-candle movements.
|
||||||
|
|
||||||
All callback functions (`custom_exit()`, `custom_stoploss()`, ... ) will be running for each 5m candle once the trade is opened (so 12 times in the above example of 1h timeframe, and 5m detailed timeframe).
|
All callback functions (`custom_exit()`, `custom_stoploss()`, ... ) will be running for each 5m candle once the trade is opened (so 12 times in the above example of 1h timeframe, and 5m detailed timeframe).
|
||||||
|
|
||||||
`--timeframe-detail` must be smaller than the original timeframe, otherwise backtesting will fail to start.
|
`--timeframe-detail` must be smaller than the original timeframe, otherwise backtesting will fail to start.
|
||||||
@@ -631,22 +601,22 @@ To compare multiple strategies, a list of Strategies can be provided to backtest
|
|||||||
This is limited to 1 timeframe value per run. However, data is only loaded once from disk so if you have multiple
|
This is limited to 1 timeframe value per run. However, data is only loaded once from disk so if you have multiple
|
||||||
strategies you'd like to compare, this will give a nice runtime boost.
|
strategies you'd like to compare, this will give a nice runtime boost.
|
||||||
|
|
||||||
All listed Strategies need to be in the same directory, unless also `--recursive-strategy-search` is specified, where sub-directories within the strategy directory are also considered.
|
All listed Strategies need to be in the same directory.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
freqtrade backtesting --timerange 20180401-20180410 --timeframe 5m --strategy-list Strategy001 Strategy002 --export trades
|
freqtrade backtesting --timerange 20180401-20180410 --timeframe 5m --strategy-list Strategy001 Strategy002 --export trades
|
||||||
```
|
```
|
||||||
|
|
||||||
This will save the results to `user_data/backtest_results/backtest-result-<datetime>.json`, including results for both `Strategy001` and `Strategy002`.
|
This will save the results to `user_data/backtest_results/backtest-result-<strategy>.json`, injecting the strategy-name into the target filename.
|
||||||
There will be an additional table comparing win/losses of the different strategies (identical to the "Total" row in the first table).
|
There will be an additional table comparing win/losses of the different strategies (identical to the "Total" row in the first table).
|
||||||
Detailed output for all strategies one after the other will be available, so make sure to scroll up to see the details per strategy.
|
Detailed output for all strategies one after the other will be available, so make sure to scroll up to see the details per strategy.
|
||||||
|
|
||||||
```
|
```
|
||||||
================================================== STRATEGY SUMMARY ===================================================================
|
=========================================================== STRATEGY SUMMARY ===========================================================================
|
||||||
| Strategy | Trades | Avg Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins | Draws | Losses | Drawdown % |
|
| Strategy | Entries | Avg Profit % | Cum Profit % | Tot Profit BTC | Tot Profit % | Avg Duration | Wins | Draws | Losses | Drawdown % |
|
||||||
|-------------+---------+----------------+------------------+----------------+----------------+-------+--------+--------+------------|
|
|:------------|---------:|---------------:|---------------:|-----------------:|---------------:|:---------------|------:|-------:|-------:|-----------:|
|
||||||
| Strategy1 | 429 | 0.36 | 0.00762792 | 76.20 | 4:12:00 | 186 | 0 | 243 | 45.2 |
|
| Strategy1 | 429 | 0.36 | 152.41 | 0.00762792 | 76.20 | 4:12:00 | 186 | 0 | 243 | 45.2 |
|
||||||
| Strategy2 | 1487 | -0.13 | -0.00988917 | -98.79 | 4:43:00 | 662 | 0 | 825 | 241.68 |
|
| Strategy2 | 1487 | -0.13 | -197.58 | -0.00988917 | -98.79 | 4:43:00 | 662 | 0 | 825 | 241.68 |
|
||||||
```
|
```
|
||||||
|
|
||||||
## Next step
|
## Next step
|
||||||
|
|||||||
@@ -7,32 +7,16 @@ This page provides you some basic concepts on how Freqtrade works and operates.
|
|||||||
* **Strategy**: Your trading strategy, telling the bot what to do.
|
* **Strategy**: Your trading strategy, telling the bot what to do.
|
||||||
* **Trade**: Open position.
|
* **Trade**: Open position.
|
||||||
* **Open Order**: Order which is currently placed on the exchange, and is not yet complete.
|
* **Open Order**: Order which is currently placed on the exchange, and is not yet complete.
|
||||||
* **Pair**: Tradable pair, usually in the format of Base/Quote (e.g. `XRP/USDT` for spot, `XRP/USDT:USDT` for futures).
|
* **Pair**: Tradable pair, usually in the format of Base/Quote (e.g. XRP/USDT).
|
||||||
* **Timeframe**: Candle length to use (e.g. `"5m"`, `"1h"`, ...).
|
* **Timeframe**: Candle length to use (e.g. `"5m"`, `"1h"`, ...).
|
||||||
* **Indicators**: Technical indicators (SMA, EMA, RSI, ...).
|
* **Indicators**: Technical indicators (SMA, EMA, RSI, ...).
|
||||||
* **Limit order**: Limit orders which execute at the defined limit price or better.
|
* **Limit order**: Limit orders which execute at the defined limit price or better.
|
||||||
* **Market order**: Guaranteed to fill, may move price depending on the order size.
|
* **Market order**: Guaranteed to fill, may move price depending on the order size.
|
||||||
* **Current Profit**: Currently pending (unrealized) profit for this trade. This is mainly used throughout the bot and UI.
|
|
||||||
* **Realized Profit**: Already realized profit. Only relevant in combination with [partial exits](strategy-callbacks.md#adjust-trade-position) - which also explains the calculation logic for this.
|
|
||||||
* **Total Profit**: Combined realized and unrealized profit. The relative number (%) is calculated against the total investment in this trade.
|
|
||||||
|
|
||||||
## Fee handling
|
## Fee handling
|
||||||
|
|
||||||
All profit calculations of Freqtrade include fees. For Backtesting / Hyperopt / Dry-run modes, the exchange default fee is used (lowest tier on the exchange). For live operations, fees are used as applied by the exchange (this includes BNB rebates etc.).
|
All profit calculations of Freqtrade include fees. For Backtesting / Hyperopt / Dry-run modes, the exchange default fee is used (lowest tier on the exchange). For live operations, fees are used as applied by the exchange (this includes BNB rebates etc.).
|
||||||
|
|
||||||
## Pair naming
|
|
||||||
|
|
||||||
Freqtrade follows the [ccxt naming convention](https://docs.ccxt.com/#/README?id=consistency-of-base-and-quote-currencies) for currencies.
|
|
||||||
Using the wrong naming convention in the wrong market will usually result in the bot not recognizing the pair, usually resulting in errors like "this pair is not available".
|
|
||||||
|
|
||||||
### Spot pair naming
|
|
||||||
|
|
||||||
For spot pairs, naming will be `base/quote` (e.g. `ETH/USDT`).
|
|
||||||
|
|
||||||
### Futures pair naming
|
|
||||||
|
|
||||||
For futures pairs, naming will be `base/quote:settle` (e.g. `ETH/USDT:USDT`).
|
|
||||||
|
|
||||||
## Bot execution logic
|
## Bot execution logic
|
||||||
|
|
||||||
Starting freqtrade in dry-run or live mode (using `freqtrade trade`) will start the bot and start the bot iteration loop.
|
Starting freqtrade in dry-run or live mode (using `freqtrade trade`) will start the bot and start the bot iteration loop.
|
||||||
@@ -49,8 +33,6 @@ By default, the bot loop runs every few seconds (`internals.process_throttle_sec
|
|||||||
* Call `populate_indicators()`
|
* Call `populate_indicators()`
|
||||||
* Call `populate_entry_trend()`
|
* Call `populate_entry_trend()`
|
||||||
* Call `populate_exit_trend()`
|
* Call `populate_exit_trend()`
|
||||||
* Update trades open order state from exchange.
|
|
||||||
* Call `order_filled()` strategy callback for filled orders.
|
|
||||||
* Check timeouts for open orders.
|
* Check timeouts for open orders.
|
||||||
* Calls `check_entry_timeout()` strategy callback for open entry orders.
|
* Calls `check_entry_timeout()` strategy callback for open entry orders.
|
||||||
* Calls `check_exit_timeout()` strategy callback for open exit orders.
|
* Calls `check_exit_timeout()` strategy callback for open exit orders.
|
||||||
@@ -75,10 +57,10 @@ This loop will be repeated again and again until the bot is stopped.
|
|||||||
|
|
||||||
* Load historic data for configured pairlist.
|
* Load historic data for configured pairlist.
|
||||||
* Calls `bot_start()` once.
|
* Calls `bot_start()` once.
|
||||||
|
* Calls `bot_loop_start()` once.
|
||||||
* Calculate indicators (calls `populate_indicators()` once per pair).
|
* Calculate indicators (calls `populate_indicators()` once per pair).
|
||||||
* Calculate entry / exit signals (calls `populate_entry_trend()` and `populate_exit_trend()` once per pair).
|
* Calculate entry / exit signals (calls `populate_entry_trend()` and `populate_exit_trend()` once per pair).
|
||||||
* Loops per candle simulating entry and exit points.
|
* Loops per candle simulating entry and exit points.
|
||||||
* Calls `bot_loop_start()` strategy callback.
|
|
||||||
* Check for Order timeouts, either via the `unfilledtimeout` configuration, or via `check_entry_timeout()` / `check_exit_timeout()` strategy callbacks.
|
* Check for Order timeouts, either via the `unfilledtimeout` configuration, or via `check_entry_timeout()` / `check_exit_timeout()` strategy callbacks.
|
||||||
* Calls `adjust_entry_price()` strategy callback for open entry orders.
|
* Calls `adjust_entry_price()` strategy callback for open entry orders.
|
||||||
* Check for trade entry signals (`enter_long` / `enter_short` columns).
|
* Check for trade entry signals (`enter_long` / `enter_short` columns).
|
||||||
@@ -87,15 +69,9 @@ This loop will be repeated again and again until the bot is stopped.
|
|||||||
* In Margin and Futures mode, `leverage()` strategy callback is called to determine the desired leverage.
|
* In Margin and Futures mode, `leverage()` strategy callback is called to determine the desired leverage.
|
||||||
* Determine stake size by calling the `custom_stake_amount()` callback.
|
* Determine stake size by calling the `custom_stake_amount()` callback.
|
||||||
* Check position adjustments for open trades if enabled and call `adjust_trade_position()` to determine if an additional order is requested.
|
* Check position adjustments for open trades if enabled and call `adjust_trade_position()` to determine if an additional order is requested.
|
||||||
* Call `order_filled()` strategy callback for filled entry orders.
|
|
||||||
* Call `custom_stoploss()` and `custom_exit()` to find custom exit points.
|
* Call `custom_stoploss()` and `custom_exit()` to find custom exit points.
|
||||||
* For exits based on exit-signal, custom-exit and partial exits: Call `custom_exit_price()` to determine exit price (Prices are moved to be within the closing candle).
|
* For exits based on exit-signal, custom-exit and partial exits: Call `custom_exit_price()` to determine exit price (Prices are moved to be within the closing candle).
|
||||||
* Call `order_filled()` strategy callback for filled exit orders.
|
|
||||||
* Generate backtest report output
|
* Generate backtest report output
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Both Backtesting and Hyperopt include exchange default Fees in the calculation. Custom fees can be passed to backtesting / hyperopt by specifying the `--fee` argument.
|
Both Backtesting and Hyperopt include exchange default Fees in the calculation. Custom fees can be passed to backtesting / hyperopt by specifying the `--fee` argument.
|
||||||
|
|
||||||
!!! Warning "Callback call frequency"
|
|
||||||
Backtesting will call each callback at max. once per candle (`--timeframe-detail` modifies this behavior to once per detailed candle).
|
|
||||||
Most callbacks will be called once per iteration in live (usually every ~5s) - which can cause backtesting mismatches.
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
This page explains the different parameters of the bot and how to run it.
|
This page explains the different parameters of the bot and how to run it.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
If you've used `setup.sh`, don't forget to activate your virtual environment (`source .venv/bin/activate`) before running freqtrade commands.
|
If you've used `setup.sh`, don't forget to activate your virtual environment (`source .env/bin/activate`) before running freqtrade commands.
|
||||||
|
|
||||||
!!! Warning "Up-to-date clock"
|
!!! Warning "Up-to-date clock"
|
||||||
The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges.
|
The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges.
|
||||||
@@ -12,50 +12,41 @@ This page explains the different parameters of the bot and how to run it.
|
|||||||
|
|
||||||
```
|
```
|
||||||
usage: freqtrade [-h] [-V]
|
usage: freqtrade [-h] [-V]
|
||||||
{trade,create-userdir,new-config,show-config,new-strategy,download-data,convert-data,convert-trade-data,trades-to-ohlcv,list-data,backtesting,backtesting-show,backtesting-analysis,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-markets,list-pairs,list-strategies,list-freqaimodels,list-timeframes,show-trades,test-pairlist,convert-db,install-ui,plot-dataframe,plot-profit,webserver,strategy-updater,lookahead-analysis,recursive-analysis}
|
{trade,create-userdir,new-config,new-strategy,download-data,convert-data,convert-trade-data,list-data,backtesting,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-hyperopts,list-markets,list-pairs,list-strategies,list-timeframes,show-trades,test-pairlist,install-ui,plot-dataframe,plot-profit,webserver}
|
||||||
...
|
...
|
||||||
|
|
||||||
Free, open source crypto trading bot
|
Free, open source crypto trading bot
|
||||||
|
|
||||||
positional arguments:
|
positional arguments:
|
||||||
{trade,create-userdir,new-config,show-config,new-strategy,download-data,convert-data,convert-trade-data,trades-to-ohlcv,list-data,backtesting,backtesting-show,backtesting-analysis,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-markets,list-pairs,list-strategies,list-freqaimodels,list-timeframes,show-trades,test-pairlist,convert-db,install-ui,plot-dataframe,plot-profit,webserver,strategy-updater,lookahead-analysis,recursive-analysis}
|
{trade,create-userdir,new-config,new-strategy,download-data,convert-data,convert-trade-data,list-data,backtesting,edge,hyperopt,hyperopt-list,hyperopt-show,list-exchanges,list-hyperopts,list-markets,list-pairs,list-strategies,list-timeframes,show-trades,test-pairlist,install-ui,plot-dataframe,plot-profit,webserver}
|
||||||
trade Trade module.
|
trade Trade module.
|
||||||
create-userdir Create user-data directory.
|
create-userdir Create user-data directory.
|
||||||
new-config Create new config
|
new-config Create new config
|
||||||
show-config Show resolved config
|
|
||||||
new-strategy Create new strategy
|
new-strategy Create new strategy
|
||||||
download-data Download backtesting data.
|
download-data Download backtesting data.
|
||||||
convert-data Convert candle (OHLCV) data from one format to
|
convert-data Convert candle (OHLCV) data from one format to
|
||||||
another.
|
another.
|
||||||
convert-trade-data Convert trade data from one format to another.
|
convert-trade-data Convert trade data from one format to another.
|
||||||
trades-to-ohlcv Convert trade data to OHLCV data.
|
|
||||||
list-data List downloaded data.
|
list-data List downloaded data.
|
||||||
backtesting Backtesting module.
|
backtesting Backtesting module.
|
||||||
backtesting-show Show past Backtest results
|
|
||||||
backtesting-analysis
|
|
||||||
Backtest Analysis module.
|
|
||||||
edge Edge module.
|
edge Edge module.
|
||||||
hyperopt Hyperopt module.
|
hyperopt Hyperopt module.
|
||||||
hyperopt-list List Hyperopt results
|
hyperopt-list List Hyperopt results
|
||||||
hyperopt-show Show details of Hyperopt results
|
hyperopt-show Show details of Hyperopt results
|
||||||
list-exchanges Print available exchanges.
|
list-exchanges Print available exchanges.
|
||||||
|
list-hyperopts Print available hyperopt classes.
|
||||||
list-markets Print markets on exchange.
|
list-markets Print markets on exchange.
|
||||||
list-pairs Print pairs on exchange.
|
list-pairs Print pairs on exchange.
|
||||||
list-strategies Print available strategies.
|
list-strategies Print available strategies.
|
||||||
list-freqaimodels Print available freqAI models.
|
|
||||||
list-timeframes Print available timeframes for the exchange.
|
list-timeframes Print available timeframes for the exchange.
|
||||||
show-trades Show trades.
|
show-trades Show trades.
|
||||||
test-pairlist Test your pairlist configuration.
|
test-pairlist Test your pairlist configuration.
|
||||||
convert-db Migrate database to different system
|
|
||||||
install-ui Install FreqUI
|
install-ui Install FreqUI
|
||||||
plot-dataframe Plot candles with indicators.
|
plot-dataframe Plot candles with indicators.
|
||||||
plot-profit Generate plot showing profits.
|
plot-profit Generate plot showing profits.
|
||||||
webserver Webserver module.
|
webserver Webserver module.
|
||||||
strategy-updater updates outdated strategy files to the current version
|
|
||||||
lookahead-analysis Check for potential look ahead bias.
|
|
||||||
recursive-analysis Check for potential recursive formula issue.
|
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
|
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ Per default, the bot loads the configuration from the `config.json` file, locate
|
|||||||
|
|
||||||
You can specify a different configuration file used by the bot with the `-c/--config` command-line option.
|
You can specify a different configuration file used by the bot with the `-c/--config` command-line option.
|
||||||
|
|
||||||
If you used the [Quick start](docker_quickstart.md#docker-quick-start) method for installing
|
If you used the [Quick start](installation.md/#quick-start) method for installing
|
||||||
the bot, the installation script should have already created the default configuration file (`config.json`) for you.
|
the bot, the installation script should have already created the default configuration file (`config.json`) for you.
|
||||||
|
|
||||||
If the default configuration file is not created we recommend to use `freqtrade new-config --config user_data/config.json` to generate a basic configuration file.
|
If the default configuration file is not created we recommend to use `freqtrade new-config --config config.json` to generate a basic configuration file.
|
||||||
|
|
||||||
The Freqtrade configuration file is to be written in JSON format.
|
The Freqtrade configuration file is to be written in JSON format.
|
||||||
|
|
||||||
@@ -39,29 +39,16 @@ Please note that Environment variables will overwrite corresponding settings in
|
|||||||
|
|
||||||
Common example:
|
Common example:
|
||||||
|
|
||||||
``` bash
|
```
|
||||||
FREQTRADE__TELEGRAM__CHAT_ID=<telegramchatid>
|
FREQTRADE__TELEGRAM__CHAT_ID=<telegramchatid>
|
||||||
FREQTRADE__TELEGRAM__TOKEN=<telegramToken>
|
FREQTRADE__TELEGRAM__TOKEN=<telegramToken>
|
||||||
FREQTRADE__EXCHANGE__KEY=<yourExchangeKey>
|
FREQTRADE__EXCHANGE__KEY=<yourExchangeKey>
|
||||||
FREQTRADE__EXCHANGE__SECRET=<yourExchangeSecret>
|
FREQTRADE__EXCHANGE__SECRET=<yourExchangeSecret>
|
||||||
```
|
```
|
||||||
|
|
||||||
Json lists are parsed as json - so you can use the following to set a list of pairs:
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
export FREQTRADE__EXCHANGE__PAIR_WHITELIST='["BTC/USDT", "ETH/USDT"]'
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Environment variables detected are logged at startup - so if you can't find why a value is not what you think it should be based on the configuration, make sure it's not loaded from an environment variable.
|
Environment variables detected are logged at startup - so if you can't find why a value is not what you think it should be based on the configuration, make sure it's not loaded from an environment variable.
|
||||||
|
|
||||||
!!! Tip "Validate combined result"
|
|
||||||
You can use the [show-config subcommand](utils.md#show-config) to see the final, combined configuration.
|
|
||||||
|
|
||||||
??? Warning "Loading sequence"
|
|
||||||
Environment variables are loaded after the initial configuration. As such, you cannot provide the path to the configuration through environment variables. Please use `--config path/to/config.json` for that.
|
|
||||||
This also applies to `user_dir` to some degree. while the user directory can be set through environment variables - the configuration will **not** be loaded from that location.
|
|
||||||
|
|
||||||
### Multiple configuration files
|
### Multiple configuration files
|
||||||
|
|
||||||
Multiple configuration files can be specified and used by the bot or the bot can read its configuration parameters from the process standard input stream.
|
Multiple configuration files can be specified and used by the bot or the bot can read its configuration parameters from the process standard input stream.
|
||||||
@@ -69,9 +56,6 @@ Multiple configuration files can be specified and used by the bot or the bot can
|
|||||||
You can specify additional configuration files in `add_config_files`. Files specified in this parameter will be loaded and merged with the initial config file. The files are resolved relative to the initial configuration file.
|
You can specify additional configuration files in `add_config_files`. Files specified in this parameter will be loaded and merged with the initial config file. The files are resolved relative to the initial configuration file.
|
||||||
This is similar to using multiple `--config` parameters, but simpler in usage as you don't have to specify all files for all commands.
|
This is similar to using multiple `--config` parameters, but simpler in usage as you don't have to specify all files for all commands.
|
||||||
|
|
||||||
!!! Tip "Validate combined result"
|
|
||||||
You can use the [show-config subcommand](utils.md#show-config) to see the final, combined configuration.
|
|
||||||
|
|
||||||
!!! Tip "Use multiple configuration files to keep secrets secret"
|
!!! Tip "Use multiple configuration files to keep secrets secret"
|
||||||
You can use a 2nd configuration file containing your secrets. That way you can share your "primary" configuration file, while still keeping your API keys for yourself.
|
You can use a 2nd configuration file containing your secrets. That way you can share your "primary" configuration file, while still keeping your API keys for yourself.
|
||||||
The 2nd file should only specify what you intend to override.
|
The 2nd file should only specify what you intend to override.
|
||||||
@@ -129,19 +113,6 @@ This is similar to using multiple `--config` parameters, but simpler in usage as
|
|||||||
|
|
||||||
If multiple files are in the `add_config_files` section, then they will be assumed to be at identical levels, having the last occurrence override the earlier config (unless a parent already defined such a key).
|
If multiple files are in the `add_config_files` section, then they will be assumed to be at identical levels, having the last occurrence override the earlier config (unless a parent already defined such a key).
|
||||||
|
|
||||||
## Editor autocomplete and validation
|
|
||||||
|
|
||||||
If you are using an editor that supports JSON schema, you can use the schema provided by Freqtrade to get autocompletion and validation of your configuration file by adding the following line to the top of your configuration file:
|
|
||||||
|
|
||||||
``` json
|
|
||||||
{
|
|
||||||
"$schema": "https://schema.freqtrade.io/schema.json",
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
??? Note "Develop version"
|
|
||||||
The develop schema is available as `https://schema.freqtrade.io/schema_dev.json` - though we recommend to stick to the stable version for the best experience.
|
|
||||||
|
|
||||||
## Configuration parameters
|
## Configuration parameters
|
||||||
|
|
||||||
The table below will list all configuration parameters available.
|
The table below will list all configuration parameters available.
|
||||||
@@ -152,10 +123,10 @@ Freqtrade can also load many options via command line (CLI) arguments (check out
|
|||||||
|
|
||||||
The prevalence for all Options is as follows:
|
The prevalence for all Options is as follows:
|
||||||
|
|
||||||
* CLI arguments override any other option
|
- CLI arguments override any other option
|
||||||
* [Environment Variables](#environment-variables)
|
- [Environment Variables](#environment-variables)
|
||||||
* Configuration files are used in sequence (the last file wins) and override Strategy configurations.
|
- Configuration files are used in sequence (the last file wins) and override Strategy configurations.
|
||||||
* Strategy configurations are only used if they are not set via configuration or command-line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
|
- Strategy configurations are only used if they are not set via configuration or command-line arguments. These options are marked with [Strategy Override](#parameters-in-the-strategy) in the below table.
|
||||||
|
|
||||||
### Parameters table
|
### Parameters table
|
||||||
|
|
||||||
@@ -163,7 +134,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
|
|
||||||
| Parameter | Description |
|
| Parameter | Description |
|
||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| `max_open_trades` | **Required.** Number of open trades your bot is allowed to have. Only one open trade per pair is possible, so the length of your pairlist is another limitation that can apply. If -1 then it is ignored (i.e. potentially unlimited open trades, limited by the pairlist). [More information below](#configuring-amount-per-trade). [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Positive integer or -1.
|
| `max_open_trades` | **Required.** Number of open trades your bot is allowed to have. Only one open trade per pair is possible, so the length of your pairlist is another limitation that can apply. If -1 then it is ignored (i.e. potentially unlimited open trades, limited by the pairlist). [More information below](#configuring-amount-per-trade).<br> **Datatype:** Positive integer or -1.
|
||||||
| `stake_currency` | **Required.** Crypto-currency used for trading. <br> **Datatype:** String
|
| `stake_currency` | **Required.** Crypto-currency used for trading. <br> **Datatype:** String
|
||||||
| `stake_amount` | **Required.** Amount of crypto-currency your bot will use for each trade. Set it to `"unlimited"` to allow the bot to use all available balance. [More information below](#configuring-amount-per-trade). <br> **Datatype:** Positive float or `"unlimited"`.
|
| `stake_amount` | **Required.** Amount of crypto-currency your bot will use for each trade. Set it to `"unlimited"` to allow the bot to use all available balance. [More information below](#configuring-amount-per-trade). <br> **Datatype:** Positive float or `"unlimited"`.
|
||||||
| `tradable_balance_ratio` | Ratio of the total account balance the bot is allowed to trade. [More information below](#configuring-amount-per-trade). <br>*Defaults to `0.99` 99%).*<br> **Datatype:** Positive float between `0.1` and `1.0`.
|
| `tradable_balance_ratio` | Ratio of the total account balance the bot is allowed to trade. [More information below](#configuring-amount-per-trade). <br>*Defaults to `0.99` 99%).*<br> **Datatype:** Positive float between `0.1` and `1.0`.
|
||||||
@@ -174,7 +145,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `timeframe` | The timeframe to use (e.g `1m`, `5m`, `15m`, `30m`, `1h` ...). Usually missing in configuration, and specified in the strategy. [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** String
|
| `timeframe` | The timeframe to use (e.g `1m`, `5m`, `15m`, `30m`, `1h` ...). Usually missing in configuration, and specified in the strategy. [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** String
|
||||||
| `fiat_display_currency` | Fiat currency used to show your profits. [More information below](#what-values-can-be-used-for-fiat_display_currency). <br> **Datatype:** String
|
| `fiat_display_currency` | Fiat currency used to show your profits. [More information below](#what-values-can-be-used-for-fiat_display_currency). <br> **Datatype:** String
|
||||||
| `dry_run` | **Required.** Define if the bot must be in Dry Run or production mode. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
| `dry_run` | **Required.** Define if the bot must be in Dry Run or production mode. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||||
| `dry_run_wallet` | Define the starting amount in stake currency for the simulated wallet used by the bot running in Dry Run mode. [More information below](#dry-run-wallet)<br>*Defaults to `1000`.* <br> **Datatype:** Float or Dict
|
| `dry_run_wallet` | Define the starting amount in stake currency for the simulated wallet used by the bot running in Dry Run mode.<br>*Defaults to `1000`.* <br> **Datatype:** Float
|
||||||
| `cancel_open_orders_on_exit` | Cancel open orders when the `/stop` RPC command is issued, `Ctrl+C` is pressed or the bot dies unexpectedly. When set to `true`, this allows you to use `/stop` to cancel unfilled and partially filled orders in the event of a market crash. It does not impact open positions. <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
| `cancel_open_orders_on_exit` | Cancel open orders when the `/stop` RPC command is issued, `Ctrl+C` is pressed or the bot dies unexpectedly. When set to `true`, this allows you to use `/stop` to cancel unfilled and partially filled orders in the event of a market crash. It does not impact open positions. <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||||
| `process_only_new_candles` | Enable processing of indicators only when new candles arrive. If false each loop populates the indicators, this will mean the same candle is processed many times creating system load but can be useful of your strategy depends on tick data not only candle. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
| `process_only_new_candles` | Enable processing of indicators only when new candles arrive. If false each loop populates the indicators, this will mean the same candle is processed many times creating system load but can be useful of your strategy depends on tick data not only candle. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||||
| `minimal_roi` | **Required.** Set the threshold as ratio the bot will use to exit a trade. [More information below](#understand-minimal_roi). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Dict
|
| `minimal_roi` | **Required.** Set the threshold as ratio the bot will use to exit a trade. [More information below](#understand-minimal_roi). [Strategy Override](#parameters-in-the-strategy). <br> **Datatype:** Dict
|
||||||
@@ -184,29 +155,29 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `trailing_stop_positive_offset` | Offset on when to apply `trailing_stop_positive`. Percentage value which should be positive. More details in the [stoploss documentation](stoploss.md#trailing-stop-loss-only-once-the-trade-has-reached-a-certain-offset). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0` (no offset).* <br> **Datatype:** Float
|
| `trailing_stop_positive_offset` | Offset on when to apply `trailing_stop_positive`. Percentage value which should be positive. More details in the [stoploss documentation](stoploss.md#trailing-stop-loss-only-once-the-trade-has-reached-a-certain-offset). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0` (no offset).* <br> **Datatype:** Float
|
||||||
| `trailing_only_offset_is_reached` | Only apply trailing stoploss when the offset is reached. [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
| `trailing_only_offset_is_reached` | Only apply trailing stoploss when the offset is reached. [stoploss documentation](stoploss.md). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||||
| `fee` | Fee used during backtesting / dry-runs. Should normally not be configured, which has freqtrade fall back to the exchange default fee. Set as ratio (e.g. 0.001 = 0.1%). Fee is applied twice for each trade, once when buying, once when selling. <br> **Datatype:** Float (as ratio)
|
| `fee` | Fee used during backtesting / dry-runs. Should normally not be configured, which has freqtrade fall back to the exchange default fee. Set as ratio (e.g. 0.001 = 0.1%). Fee is applied twice for each trade, once when buying, once when selling. <br> **Datatype:** Float (as ratio)
|
||||||
| `futures_funding_rate` | User-specified funding rate to be used when historical funding rates are not available from the exchange. This does not overwrite real historical rates. It is recommended that this be set to 0 unless you are testing a specific coin and you understand how the funding rate will affect freqtrade's profit calculations. [More information here](leverage.md#unavailable-funding-rates) <br>*Defaults to `None`.*<br> **Datatype:** Float
|
| `futures_funding_rate` | User-specified funding rate to be used when historical funding rates are not available from the exchange. This does not overwrite real historical rates. It is recommended that this be set to 0 unless you are testing a specific coin and you understand how the funding rate will affect freqtrade's profit calculations. [More information here](leverage.md#unavailable-funding-rates) <br>*Defaults to None.*<br> **Datatype:** Float
|
||||||
| `trading_mode` | Specifies if you want to trade regularly, trade with leverage, or trade contracts whose prices are derived from matching cryptocurrency prices. [leverage documentation](leverage.md). <br>*Defaults to `"spot"`.* <br> **Datatype:** String
|
| `trading_mode` | Specifies if you want to trade regularly, trade with leverage, or trade contracts whose prices are derived from matching cryptocurrency prices. [leverage documentation](leverage.md). <br>*Defaults to `"spot"`.* <br> **Datatype:** String
|
||||||
| `margin_mode` | When trading with leverage, this determines if the collateral owned by the trader will be shared or isolated to each trading pair [leverage documentation](leverage.md). <br> **Datatype:** String
|
| `margin_mode` | When trading with leverage, this determines if the collateral owned by the trader will be shared or isolated to each trading pair [leverage documentation](leverage.md). <br> **Datatype:** String
|
||||||
| `liquidation_buffer` | A ratio specifying how large of a safety net to place between the liquidation price and the stoploss to prevent a position from reaching the liquidation price [leverage documentation](leverage.md). <br>*Defaults to `0.05`.* <br> **Datatype:** Float
|
| `liquidation_buffer` | A ratio specifying how large of a safety net to place between the liquidation price and the stoploss to prevent a position from reaching the liquidation price [leverage documentation](leverage.md). <br>*Defaults to `0.05`.* <br> **Datatype:** Float
|
||||||
| | **Unfilled timeout**
|
| | **Unfilled timeout**
|
||||||
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
|
| `unfilledtimeout.entry` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled entry order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
|
||||||
| `unfilledtimeout.exit` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled exit order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
|
| `unfilledtimeout.exit` | **Required.** How long (in minutes or seconds) the bot will wait for an unfilled exit order to complete, after which the order will be cancelled and repeated at current (new) price, as long as there is a signal. [Strategy Override](#parameters-in-the-strategy).<br> **Datatype:** Integer
|
||||||
| `unfilledtimeout.unit` | Unit to use in unfilledtimeout setting. Note: If you set unfilledtimeout.unit to "seconds", "internals.process_throttle_secs" must be inferior or equal to timeout [Strategy Override](#parameters-in-the-strategy). <br> *Defaults to `"minutes"`.* <br> **Datatype:** String
|
| `unfilledtimeout.unit` | Unit to use in unfilledtimeout setting. Note: If you set unfilledtimeout.unit to "seconds", "internals.process_throttle_secs" must be inferior or equal to timeout [Strategy Override](#parameters-in-the-strategy). <br> *Defaults to `minutes`.* <br> **Datatype:** String
|
||||||
| `unfilledtimeout.exit_timeout_count` | How many times can exit orders time out. Once this number of timeouts is reached, an emergency exit is triggered. 0 to disable and allow unlimited order cancels. [Strategy Override](#parameters-in-the-strategy).<br>*Defaults to `0`.* <br> **Datatype:** Integer
|
| `unfilledtimeout.exit_timeout_count` | How many times can exit orders time out. Once this number of timeouts is reached, an emergency exit is triggered. 0 to disable and allow unlimited order cancels. [Strategy Override](#parameters-in-the-strategy).<br>*Defaults to `0`.* <br> **Datatype:** Integer
|
||||||
| | **Pricing**
|
| | **Pricing**
|
||||||
| `entry_pricing.price_side` | Select the side of the spread the bot should look at to get the entry rate. [More information below](#entry-price).<br> *Defaults to `"same"`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
|
| `entry_pricing.price_side` | Select the side of the spread the bot should look at to get the entry rate. [More information below](#buy-price-side).<br> *Defaults to `same`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
|
||||||
| `entry_pricing.price_last_balance` | **Required.** Interpolate the bidding price. More information [below](#entry-price-without-orderbook-enabled).
|
| `entry_pricing.price_last_balance` | **Required.** Interpolate the bidding price. More information [below](#entry-price-without-orderbook-enabled).
|
||||||
| `entry_pricing.use_order_book` | Enable entering using the rates in [Order Book Entry](#entry-price-with-orderbook-enabled). <br> *Defaults to `true`.*<br> **Datatype:** Boolean
|
| `entry_pricing.use_order_book` | Enable entering using the rates in [Order Book Entry](#entry-price-with-orderbook-enabled). <br> *Defaults to `True`.*<br> **Datatype:** Boolean
|
||||||
| `entry_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to enter a trade. I.e. a value of 2 will allow the bot to pick the 2nd entry in [Order Book Entry](#entry-price-with-orderbook-enabled). <br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
|
| `entry_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to enter a trade. I.e. a value of 2 will allow the bot to pick the 2nd entry in [Order Book Entry](#entry-price-with-orderbook-enabled). <br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
|
||||||
| `entry_pricing. check_depth_of_market.enabled` | Do not enter if the difference of buy orders and sell orders is met in Order Book. [Check market depth](#check-depth-of-market). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
| `entry_pricing. check_depth_of_market.enabled` | Do not enter if the difference of buy orders and sell orders is met in Order Book. [Check market depth](#check-depth-of-market). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||||
| `entry_pricing. check_depth_of_market.bids_to_ask_delta` | The difference ratio of buy orders and sell orders found in Order Book. A value below 1 means sell order size is greater, while value greater than 1 means buy order size is higher. [Check market depth](#check-depth-of-market) <br> *Defaults to `0`.* <br> **Datatype:** Float (as ratio)
|
| `entry_pricing. check_depth_of_market.bids_to_ask_delta` | The difference ratio of buy orders and sell orders found in Order Book. A value below 1 means sell order size is greater, while value greater than 1 means buy order size is higher. [Check market depth](#check-depth-of-market) <br> *Defaults to `0`.* <br> **Datatype:** Float (as ratio)
|
||||||
| `exit_pricing.price_side` | Select the side of the spread the bot should look at to get the exit rate. [More information below](#exit-price-side).<br> *Defaults to `"same"`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
|
| `exit_pricing.price_side` | Select the side of the spread the bot should look at to get the exit rate. [More information below](#exit-price-side).<br> *Defaults to `same`.* <br> **Datatype:** String (either `ask`, `bid`, `same` or `other`).
|
||||||
| `exit_pricing.price_last_balance` | Interpolate the exiting price. More information [below](#exit-price-without-orderbook-enabled).
|
| `exit_pricing.price_last_balance` | Interpolate the exiting price. More information [below](#exit-price-without-orderbook-enabled).
|
||||||
| `exit_pricing.use_order_book` | Enable exiting of open trades using [Order Book Exit](#exit-price-with-orderbook-enabled). <br> *Defaults to `true`.*<br> **Datatype:** Boolean
|
| `exit_pricing.use_order_book` | Enable exiting of open trades using [Order Book Exit](#exit-price-with-orderbook-enabled). <br> *Defaults to `True`.*<br> **Datatype:** Boolean
|
||||||
| `exit_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to exit. I.e. a value of 2 will allow the bot to pick the 2nd ask rate in [Order Book Exit](#exit-price-with-orderbook-enabled)<br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
|
| `exit_pricing.order_book_top` | Bot will use the top N rate in Order Book "price_side" to exit. I.e. a value of 2 will allow the bot to pick the 2nd ask rate in [Order Book Exit](#exit-price-with-orderbook-enabled)<br>*Defaults to `1`.* <br> **Datatype:** Positive Integer
|
||||||
| `custom_price_max_distance_ratio` | Configure maximum distance ratio between current and custom entry or exit price. <br>*Defaults to `0.02` 2%).*<br> **Datatype:** Positive float
|
| `custom_price_max_distance_ratio` | Configure maximum distance ratio between current and custom entry or exit price. <br>*Defaults to `0.02` 2%).*<br> **Datatype:** Positive float
|
||||||
| | **TODO**
|
| | **TODO**
|
||||||
| `use_exit_signal` | Use exit signals produced by the strategy in addition to the `minimal_roi`. <br>Setting this to false disables the usage of `"exit_long"` and `"exit_short"` columns. Has no influence on other exit methods (Stoploss, ROI, callbacks). [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
| `use_exit_signal` | Use exit signals produced by the strategy in addition to the `minimal_roi`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||||
| `exit_profit_only` | Wait until the bot reaches `exit_profit_offset` before taking an exit decision. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
| `exit_profit_only` | Wait until the bot reaches `exit_profit_offset` before taking an exit decision. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||||
| `exit_profit_offset` | Exit-signal is only active above this value. Only active in combination with `exit_profit_only=True`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0`.* <br> **Datatype:** Float (as ratio)
|
| `exit_profit_offset` | Exit-signal is only active above this value. Only active in combination with `exit_profit_only=True`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `0.0`.* <br> **Datatype:** Float (as ratio)
|
||||||
| `ignore_roi_if_entry_signal` | Do not exit if the entry signal is still active. This setting takes preference over `minimal_roi` and `use_exit_signal`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
| `ignore_roi_if_entry_signal` | Do not exit if the entry signal is still active. This setting takes preference over `minimal_roi` and `use_exit_signal`. [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.* <br> **Datatype:** Boolean
|
||||||
@@ -216,45 +187,44 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `position_adjustment_enable` | Enables the strategy to use position adjustments (additional buys or sells). [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.*<br> **Datatype:** Boolean
|
| `position_adjustment_enable` | Enables the strategy to use position adjustments (additional buys or sells). [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `false`.*<br> **Datatype:** Boolean
|
||||||
| `max_entry_position_adjustment` | Maximum additional order(s) for each open trade on top of the first entry Order. Set it to `-1` for unlimited additional orders. [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `-1`.*<br> **Datatype:** Positive Integer or -1
|
| `max_entry_position_adjustment` | Maximum additional order(s) for each open trade on top of the first entry Order. Set it to `-1` for unlimited additional orders. [More information here](strategy-callbacks.md#adjust-trade-position). <br> [Strategy Override](#parameters-in-the-strategy). <br>*Defaults to `-1`.*<br> **Datatype:** Positive Integer or -1
|
||||||
| | **Exchange**
|
| | **Exchange**
|
||||||
| `exchange.name` | **Required.** Name of the exchange class to use. <br> **Datatype:** String
|
| `exchange.name` | **Required.** Name of the exchange class to use. [List below](#user-content-what-values-for-exchangename). <br> **Datatype:** String
|
||||||
|
| `exchange.sandbox` | Use the 'sandbox' version of the exchange, where the exchange provides a sandbox for risk-free integration. See [here](sandbox-testing.md) in more details.<br> **Datatype:** Boolean
|
||||||
| `exchange.key` | API key to use for the exchange. Only required when you are in production mode.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
| `exchange.key` | API key to use for the exchange. Only required when you are in production mode.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||||
| `exchange.secret` | API secret to use for the exchange. Only required when you are in production mode.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
| `exchange.secret` | API secret to use for the exchange. Only required when you are in production mode.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||||
| `exchange.password` | API password to use for the exchange. Only required when you are in production mode and for exchanges that use password for API requests.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
| `exchange.password` | API password to use for the exchange. Only required when you are in production mode and for exchanges that use password for API requests.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||||
| `exchange.uid` | API uid to use for the exchange. Only required when you are in production mode and for exchanges that use uid for API requests.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
| `exchange.uid` | API uid to use for the exchange. Only required when you are in production mode and for exchanges that use uid for API requests.<br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||||
| `exchange.pair_whitelist` | List of pairs to use by the bot for trading and to check for potential trades during backtesting. Supports regex pairs as `.*/BTC`. Not used by VolumePairList. [More information](plugins.md#pairlists-and-pairlist-handlers). <br> **Datatype:** List
|
| `exchange.pair_whitelist` | List of pairs to use by the bot for trading and to check for potential trades during backtesting. Supports regex pairs as `.*/BTC`. Not used by VolumePairList. [More information](plugins.md#pairlists-and-pairlist-handlers). <br> **Datatype:** List
|
||||||
| `exchange.pair_blacklist` | List of pairs the bot must absolutely avoid for trading and backtesting. [More information](plugins.md#pairlists-and-pairlist-handlers). <br> **Datatype:** List
|
| `exchange.pair_blacklist` | List of pairs the bot must absolutely avoid for trading and backtesting. [More information](plugins.md#pairlists-and-pairlist-handlers). <br> **Datatype:** List
|
||||||
| `exchange.ccxt_config` | Additional CCXT parameters passed to both ccxt instances (sync and async). This is usually the correct place for additional ccxt configurations. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://docs.ccxt.com/#/README?id=overriding-exchange-properties-upon-instantiation). Please avoid adding exchange secrets here (use the dedicated fields instead), as they may be contained in logs. <br> **Datatype:** Dict
|
| `exchange.ccxt_config` | Additional CCXT parameters passed to both ccxt instances (sync and async). This is usually the correct place for additional ccxt configurations. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation). Please avoid adding exchange secrets here (use the dedicated fields instead), as they may be contained in logs. <br> **Datatype:** Dict
|
||||||
| `exchange.ccxt_sync_config` | Additional CCXT parameters passed to the regular (sync) ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://docs.ccxt.com/#/README?id=overriding-exchange-properties-upon-instantiation) <br> **Datatype:** Dict
|
| `exchange.ccxt_sync_config` | Additional CCXT parameters passed to the regular (sync) ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> **Datatype:** Dict
|
||||||
| `exchange.ccxt_async_config` | Additional CCXT parameters passed to the async ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://docs.ccxt.com/#/README?id=overriding-exchange-properties-upon-instantiation) <br> **Datatype:** Dict
|
| `exchange.ccxt_async_config` | Additional CCXT parameters passed to the async ccxt instance. Parameters may differ from exchange to exchange and are documented in the [ccxt documentation](https://ccxt.readthedocs.io/en/latest/manual.html#instantiation) <br> **Datatype:** Dict
|
||||||
| `exchange.enable_ws` | Enable the usage of Websockets for the exchange. <br>[More information](#consuming-exchange-websockets).<br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
|
||||||
| `exchange.markets_refresh_interval` | The interval in minutes in which markets are reloaded. <br>*Defaults to `60` minutes.* <br> **Datatype:** Positive Integer
|
| `exchange.markets_refresh_interval` | The interval in minutes in which markets are reloaded. <br>*Defaults to `60` minutes.* <br> **Datatype:** Positive Integer
|
||||||
| `exchange.skip_open_order_update` | Skips open order updates on startup should the exchange cause problems. Only relevant in live conditions.<br>*Defaults to `false`*<br> **Datatype:** Boolean
|
| `exchange.skip_pair_validation` | Skip pairlist validation on startup.<br>*Defaults to `false`<br> **Datatype:** Boolean
|
||||||
|
| `exchange.skip_open_order_update` | Skips open order updates on startup should the exchange cause problems. Only relevant in live conditions.<br>*Defaults to `false`<br> **Datatype:** Boolean
|
||||||
| `exchange.unknown_fee_rate` | Fallback value to use when calculating trading fees. This can be useful for exchanges which have fees in non-tradable currencies. The value provided here will be multiplied with the "fee cost".<br>*Defaults to `None`<br> **Datatype:** float
|
| `exchange.unknown_fee_rate` | Fallback value to use when calculating trading fees. This can be useful for exchanges which have fees in non-tradable currencies. The value provided here will be multiplied with the "fee cost".<br>*Defaults to `None`<br> **Datatype:** float
|
||||||
| `exchange.log_responses` | Log relevant exchange responses. For debug mode only - use with care.<br>*Defaults to `false`*<br> **Datatype:** Boolean
|
| `exchange.log_responses` | Log relevant exchange responses. For debug mode only - use with care.<br>*Defaults to `false`<br> **Datatype:** Boolean
|
||||||
| `exchange.only_from_ccxt` | Prevent data-download from data.binance.vision. Leaving this as false can greatly speed up downloads, but may be problematic if the site is not available.<br>*Defaults to `false`*<br> **Datatype:** Boolean
|
|
||||||
| `experimental.block_bad_exchanges` | Block exchanges known to not work with freqtrade. Leave on default unless you want to test if that exchange works now. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
| `experimental.block_bad_exchanges` | Block exchanges known to not work with freqtrade. Leave on default unless you want to test if that exchange works now. <br>*Defaults to `true`.* <br> **Datatype:** Boolean
|
||||||
| | **Plugins**
|
| | **Plugins**
|
||||||
| `edge.*` | Please refer to [edge configuration document](edge.md) for detailed explanation of all possible configuration options.
|
| `edge.*` | Please refer to [edge configuration document](edge.md) for detailed explanation of all possible configuration options.
|
||||||
| `pairlists` | Define one or more pairlists to be used. [More information](plugins.md#pairlists-and-pairlist-handlers). <br>*Defaults to `StaticPairList`.* <br> **Datatype:** List of Dicts
|
| `pairlists` | Define one or more pairlists to be used. [More information](plugins.md#pairlists-and-pairlist-handlers). <br>*Defaults to `StaticPairList`.* <br> **Datatype:** List of Dicts
|
||||||
|
| `protections` | Define one or more protections to be used. [More information](plugins.md#protections). <br> **Datatype:** List of Dicts
|
||||||
| | **Telegram**
|
| | **Telegram**
|
||||||
| `telegram.enabled` | Enable the usage of Telegram. <br> **Datatype:** Boolean
|
| `telegram.enabled` | Enable the usage of Telegram. <br> **Datatype:** Boolean
|
||||||
| `telegram.token` | Your Telegram bot token. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
| `telegram.token` | Your Telegram bot token. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||||
| `telegram.chat_id` | Your personal Telegram account id. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
| `telegram.chat_id` | Your personal Telegram account id. Only required if `telegram.enabled` is `true`. <br>**Keep it in secret, do not disclose publicly.** <br> **Datatype:** String
|
||||||
| `telegram.balance_dust_level` | Dust-level (in stake currency) - currencies with a balance below this will not be shown by `/balance`. <br> **Datatype:** float
|
| `telegram.balance_dust_level` | Dust-level (in stake currency) - currencies with a balance below this will not be shown by `/balance`. <br> **Datatype:** float
|
||||||
| `telegram.reload` | Allow "reload" buttons on telegram messages. <br>*Defaults to `true`.<br> **Datatype:** boolean
|
| `telegram.reload` | Allow "reload" buttons on telegram messages. <br>*Defaults to `True`.<br> **Datatype:** boolean
|
||||||
| `telegram.notification_settings.*` | Detailed notification settings. Refer to the [telegram documentation](telegram-usage.md) for details.<br> **Datatype:** dictionary
|
| `telegram.notification_settings.*` | Detailed notification settings. Refer to the [telegram documentation](telegram-usage.md) for details.<br> **Datatype:** dictionary
|
||||||
| `telegram.allow_custom_messages` | Enable the sending of Telegram messages from strategies via the dataprovider.send_msg() function. <br> **Datatype:** Boolean
|
|
||||||
| | **Webhook**
|
| | **Webhook**
|
||||||
| `webhook.enabled` | Enable usage of Webhook notifications <br> **Datatype:** Boolean
|
| `webhook.enabled` | Enable usage of Webhook notifications <br> **Datatype:** Boolean
|
||||||
| `webhook.url` | URL for the webhook. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.url` | URL for the webhook. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.entry` | Payload to send on entry. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.webhookentry` | Payload to send on entry. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.entry_cancel` | Payload to send on entry order cancel. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.webhookentrycancel` | Payload to send on entry order cancel. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.entry_fill` | Payload to send on entry order filled. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.webhookentryfill` | Payload to send on entry order filled. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.exit` | Payload to send on exit. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.webhookexit` | Payload to send on exit. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.exit_cancel` | Payload to send on exit order cancel. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.webhookexitcancel` | Payload to send on exit order cancel. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.exit_fill` | Payload to send on exit order filled. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.webhookexitfill` | Payload to send on exit order filled. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.status` | Payload to send on status calls. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
| `webhook.webhookstatus` | Payload to send on status calls. Only required if `webhook.enabled` is `true`. See the [webhook documentation](webhook-config.md) for more details. <br> **Datatype:** String
|
||||||
| `webhook.allow_custom_messages` | Enable the sending of Webhook messages from strategies via the dataprovider.send_msg() function. <br> **Datatype:** Boolean
|
|
||||||
| | **Rest API / FreqUI / Producer-Consumer**
|
| | **Rest API / FreqUI / Producer-Consumer**
|
||||||
| `api_server.enabled` | Enable usage of API Server. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** Boolean
|
| `api_server.enabled` | Enable usage of API Server. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** Boolean
|
||||||
| `api_server.listen_ip_address` | Bind IP address. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** IPv4
|
| `api_server.listen_ip_address` | Bind IP address. See the [API Server documentation](rest-api.md) for more details. <br> **Datatype:** IPv4
|
||||||
@@ -271,7 +241,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `disable_dataframe_checks` | Disable checking the OHLCV dataframe returned from the strategy methods for correctness. Only use when intentionally changing the dataframe and understand what you are doing. [Strategy Override](#parameters-in-the-strategy).<br> *Defaults to `False`*. <br> **Datatype:** Boolean
|
| `disable_dataframe_checks` | Disable checking the OHLCV dataframe returned from the strategy methods for correctness. Only use when intentionally changing the dataframe and understand what you are doing. [Strategy Override](#parameters-in-the-strategy).<br> *Defaults to `False`*. <br> **Datatype:** Boolean
|
||||||
| `internals.process_throttle_secs` | Set the process throttle, or minimum loop duration for one bot iteration loop. Value in second. <br>*Defaults to `5` seconds.* <br> **Datatype:** Positive Integer
|
| `internals.process_throttle_secs` | Set the process throttle, or minimum loop duration for one bot iteration loop. Value in second. <br>*Defaults to `5` seconds.* <br> **Datatype:** Positive Integer
|
||||||
| `internals.heartbeat_interval` | Print heartbeat message every N seconds. Set to 0 to disable heartbeat messages. <br>*Defaults to `60` seconds.* <br> **Datatype:** Positive Integer or 0
|
| `internals.heartbeat_interval` | Print heartbeat message every N seconds. Set to 0 to disable heartbeat messages. <br>*Defaults to `60` seconds.* <br> **Datatype:** Positive Integer or 0
|
||||||
| `internals.sd_notify` | Enables use of the sd_notify protocol to tell systemd service manager about changes in the bot state and issue keep-alive pings. See [here](advanced-setup.md#configure-the-bot-running-as-a-systemd-service) for more details. <br> **Datatype:** Boolean
|
| `internals.sd_notify` | Enables use of the sd_notify protocol to tell systemd service manager about changes in the bot state and issue keep-alive pings. See [here](installation.md#7-optional-configure-freqtrade-as-a-systemd-service) for more details. <br> **Datatype:** Boolean
|
||||||
| `strategy` | **Required** Defines Strategy class to use. Recommended to be set via `--strategy NAME`. <br> **Datatype:** ClassName
|
| `strategy` | **Required** Defines Strategy class to use. Recommended to be set via `--strategy NAME`. <br> **Datatype:** ClassName
|
||||||
| `strategy_path` | Adds an additional strategy lookup path (must be a directory). <br> **Datatype:** String
|
| `strategy_path` | Adds an additional strategy lookup path (must be a directory). <br> **Datatype:** String
|
||||||
| `recursive_strategy_search` | Set to `true` to recursively search sub-directories inside `user_data/strategies` for a strategy. <br> **Datatype:** Boolean
|
| `recursive_strategy_search` | Set to `true` to recursively search sub-directories inside `user_data/strategies` for a strategy. <br> **Datatype:** Boolean
|
||||||
@@ -279,9 +249,8 @@ Mandatory parameters are marked as **Required**, which means that they are requi
|
|||||||
| `db_url` | Declares database URL to use. NOTE: This defaults to `sqlite:///tradesv3.dryrun.sqlite` if `dry_run` is `true`, and to `sqlite:///tradesv3.sqlite` for production instances. <br> **Datatype:** String, SQLAlchemy connect string
|
| `db_url` | Declares database URL to use. NOTE: This defaults to `sqlite:///tradesv3.dryrun.sqlite` if `dry_run` is `true`, and to `sqlite:///tradesv3.sqlite` for production instances. <br> **Datatype:** String, SQLAlchemy connect string
|
||||||
| `logfile` | Specifies logfile name. Uses a rolling strategy for log file rotation for 10 files with the 1MB limit per file. <br> **Datatype:** String
|
| `logfile` | Specifies logfile name. Uses a rolling strategy for log file rotation for 10 files with the 1MB limit per file. <br> **Datatype:** String
|
||||||
| `add_config_files` | Additional config files. These files will be loaded and merged with the current config file. The files are resolved relative to the initial file.<br> *Defaults to `[]`*. <br> **Datatype:** List of strings
|
| `add_config_files` | Additional config files. These files will be loaded and merged with the current config file. The files are resolved relative to the initial file.<br> *Defaults to `[]`*. <br> **Datatype:** List of strings
|
||||||
| `dataformat_ohlcv` | Data format to use to store historical candle (OHLCV) data. <br> *Defaults to `feather`*. <br> **Datatype:** String
|
| `dataformat_ohlcv` | Data format to use to store historical candle (OHLCV) data. <br> *Defaults to `json`*. <br> **Datatype:** String
|
||||||
| `dataformat_trades` | Data format to use to store historical trades data. <br> *Defaults to `feather`*. <br> **Datatype:** String
|
| `dataformat_trades` | Data format to use to store historical trades data. <br> *Defaults to `jsongz`*. <br> **Datatype:** String
|
||||||
| `reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage (and decreasing train/inference timing in FreqAI). (Currently only affects FreqAI use-cases) <br> **Datatype:** Boolean. <br> Default: `False`.
|
|
||||||
|
|
||||||
### Parameters in the strategy
|
### Parameters in the strategy
|
||||||
|
|
||||||
@@ -291,7 +260,6 @@ Values set in the configuration file always overwrite values set in the strategy
|
|||||||
* `minimal_roi`
|
* `minimal_roi`
|
||||||
* `timeframe`
|
* `timeframe`
|
||||||
* `stoploss`
|
* `stoploss`
|
||||||
* `max_open_trades`
|
|
||||||
* `trailing_stop`
|
* `trailing_stop`
|
||||||
* `trailing_stop_positive`
|
* `trailing_stop_positive`
|
||||||
* `trailing_stop_positive_offset`
|
* `trailing_stop_positive_offset`
|
||||||
@@ -302,10 +270,10 @@ Values set in the configuration file always overwrite values set in the strategy
|
|||||||
* `order_time_in_force`
|
* `order_time_in_force`
|
||||||
* `unfilledtimeout`
|
* `unfilledtimeout`
|
||||||
* `disable_dataframe_checks`
|
* `disable_dataframe_checks`
|
||||||
* `use_exit_signal`
|
- `use_exit_signal`
|
||||||
* `exit_profit_only`
|
* `exit_profit_only`
|
||||||
* `exit_profit_offset`
|
- `exit_profit_offset`
|
||||||
* `ignore_roi_if_entry_signal`
|
- `ignore_roi_if_entry_signal`
|
||||||
* `ignore_buying_expired_candle_after`
|
* `ignore_buying_expired_candle_after`
|
||||||
* `position_adjustment_enable`
|
* `position_adjustment_enable`
|
||||||
* `max_entry_position_adjustment`
|
* `max_entry_position_adjustment`
|
||||||
@@ -318,37 +286,18 @@ There are several methods to configure how much of the stake currency the bot wi
|
|||||||
|
|
||||||
The minimum stake amount will depend on exchange and pair and is usually listed in the exchange support pages.
|
The minimum stake amount will depend on exchange and pair and is usually listed in the exchange support pages.
|
||||||
|
|
||||||
Assuming the minimum tradable amount for XRP/USD is 20 XRP (given by the exchange), and the price is 0.6\$, the minimum stake amount to buy this pair is `20 * 0.6 ~= 12`.
|
Assuming the minimum tradable amount for XRP/USD is 20 XRP (given by the exchange), and the price is 0.6$, the minimum stake amount to buy this pair is `20 * 0.6 ~= 12`.
|
||||||
This exchange has also a limit on USD - where all orders must be > 10\$ - which however does not apply in this case.
|
This exchange has also a limit on USD - where all orders must be > 10$ - which however does not apply in this case.
|
||||||
|
|
||||||
To guarantee safe execution, freqtrade will not allow buying with a stake-amount of 10.1\$, instead, it'll make sure that there's enough space to place a stoploss below the pair (+ an offset, defined by `amount_reserve_percent`, which defaults to 5%).
|
To guarantee safe execution, freqtrade will not allow buying with a stake-amount of 10.1$, instead, it'll make sure that there's enough space to place a stoploss below the pair (+ an offset, defined by `amount_reserve_percent`, which defaults to 5%).
|
||||||
|
|
||||||
With a reserve of 5%, the minimum stake amount would be ~12.6\$ (`12 * (1 + 0.05)`). If we take into account a stoploss of 10% on top of that - we'd end up with a value of ~14\$ (`12.6 / (1 - 0.1)`).
|
With a reserve of 5%, the minimum stake amount would be ~12.6$ (`12 * (1 + 0.05)`). If we take into account a stoploss of 10% on top of that - we'd end up with a value of ~14$ (`12.6 / (1 - 0.1)`).
|
||||||
|
|
||||||
To limit this calculation in case of large stoploss values, the calculated minimum stake-limit will never be more than 50% above the real limit.
|
To limit this calculation in case of large stoploss values, the calculated minimum stake-limit will never be more than 50% above the real limit.
|
||||||
|
|
||||||
!!! Warning
|
!!! Warning
|
||||||
Since the limits on exchanges are usually stable and are not updated often, some pairs can show pretty high minimum limits, simply because the price increased a lot since the last limit adjustment by the exchange. Freqtrade adjusts the stake-amount to this value, unless it's > 30% more than the calculated/desired stake-amount - in which case the trade is rejected.
|
Since the limits on exchanges are usually stable and are not updated often, some pairs can show pretty high minimum limits, simply because the price increased a lot since the last limit adjustment by the exchange. Freqtrade adjusts the stake-amount to this value, unless it's > 30% more than the calculated/desired stake-amount - in which case the trade is rejected.
|
||||||
|
|
||||||
#### Dry-run wallet
|
|
||||||
|
|
||||||
When running in dry-run mode, the bot will use a simulated wallet to execute trades. The starting balance of this wallet is defined by `dry_run_wallet` (defaults to 1000).
|
|
||||||
For more complex scenarios, you can also assign a dictionary to `dry_run_wallet` to define the starting balance for each currency.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"dry_run_wallet": {
|
|
||||||
"BTC": 0.01,
|
|
||||||
"ETH": 2,
|
|
||||||
"USDT": 1000
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Command line options (`--dry-run-wallet`) can be used to override the configuration value, but only for the float value, not for the dictionary. If you'd like to use the dictionary, please adjust the configuration file.
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
Balances not in stake-currency will not be used for trading, but are shown as part of the wallet balance.
|
|
||||||
On Cross-margin exchanges, the wallet balance may be used to calculate the available collateral for trading.
|
|
||||||
|
|
||||||
#### Tradable balance
|
#### Tradable balance
|
||||||
|
|
||||||
By default, the bot assumes that the `complete amount - 1%` is at it's disposal, and when using [dynamic stake amount](#dynamic-stake-amount), it will split the complete balance into `max_open_trades` buckets per trade.
|
By default, the bot assumes that the `complete amount - 1%` is at it's disposal, and when using [dynamic stake amount](#dynamic-stake-amount), it will split the complete balance into `max_open_trades` buckets per trade.
|
||||||
@@ -369,13 +318,11 @@ For example, if you have 10 ETH available in your wallet on the exchange and `tr
|
|||||||
To fully utilize compounding profits when using multiple bots on the same exchange account, you'll want to limit each bot to a certain starting balance.
|
To fully utilize compounding profits when using multiple bots on the same exchange account, you'll want to limit each bot to a certain starting balance.
|
||||||
This can be accomplished by setting `available_capital` to the desired starting balance.
|
This can be accomplished by setting `available_capital` to the desired starting balance.
|
||||||
|
|
||||||
Assuming your account has 10000 USDT and you want to run 2 different strategies on this exchange.
|
Assuming your account has 10.000 USDT and you want to run 2 different strategies on this exchange.
|
||||||
You'd set `available_capital=5000` - granting each bot an initial capital of 5000 USDT.
|
You'd set `available_capital=5000` - granting each bot an initial capital of 5000 USDT.
|
||||||
The bot will then split this starting balance equally into `max_open_trades` buckets.
|
The bot will then split this starting balance equally into `max_open_trades` buckets.
|
||||||
Profitable trades will result in increased stake-sizes for this bot - without affecting the stake-sizes of the other bot.
|
Profitable trades will result in increased stake-sizes for this bot - without affecting the stake-sizes of the other bot.
|
||||||
|
|
||||||
Adjusting `available_capital` requires reloading the configuration to take effect. Adjusting the `available_capital` adds the difference between the previous `available_capital` and the new `available_capital`. Decreasing the available capital when trades are open doesn't exit the trades. The difference is returned to the wallet when the trades conclude. The outcome of this differs depending on the price movement between the adjustment and exiting the trades.
|
|
||||||
|
|
||||||
!!! Warning "Incompatible with `tradable_balance_ratio`"
|
!!! Warning "Incompatible with `tradable_balance_ratio`"
|
||||||
Setting this option will replace any configuration of `tradable_balance_ratio`.
|
Setting this option will replace any configuration of `tradable_balance_ratio`.
|
||||||
|
|
||||||
@@ -388,9 +335,9 @@ To overcome this, the option `amend_last_stake_amount` can be set to `True`, whi
|
|||||||
|
|
||||||
In the example above this would mean:
|
In the example above this would mean:
|
||||||
|
|
||||||
* Trade1: 400 USDT
|
- Trade1: 400 USDT
|
||||||
* Trade2: 400 USDT
|
- Trade2: 400 USDT
|
||||||
* Trade3: 200 USDT
|
- Trade3: 200 USDT
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
This option only applies with [Static stake amount](#static-stake-amount) - since [Dynamic stake amount](#dynamic-stake-amount) divides the balances evenly.
|
This option only applies with [Static stake amount](#static-stake-amount) - since [Dynamic stake amount](#dynamic-stake-amount) divides the balances evenly.
|
||||||
@@ -408,7 +355,7 @@ This setting works in combination with `max_open_trades`. The maximum capital en
|
|||||||
For example, the bot will at most use (0.05 BTC x 3) = 0.15 BTC, assuming a configuration of `max_open_trades=3` and `stake_amount=0.05`.
|
For example, the bot will at most use (0.05 BTC x 3) = 0.15 BTC, assuming a configuration of `max_open_trades=3` and `stake_amount=0.05`.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
This setting respects the [available balance configuration](#tradable-balance).
|
This setting respects the [available balance configuration](#available-balance).
|
||||||
|
|
||||||
#### Dynamic stake amount
|
#### Dynamic stake amount
|
||||||
|
|
||||||
@@ -447,8 +394,6 @@ Or another example if your position adjustment assumes it can do 1 additional bu
|
|||||||
|
|
||||||
--8<-- "includes/pricing.md"
|
--8<-- "includes/pricing.md"
|
||||||
|
|
||||||
## Further Configuration details
|
|
||||||
|
|
||||||
### Understand minimal_roi
|
### Understand minimal_roi
|
||||||
|
|
||||||
The `minimal_roi` configuration parameter is a JSON object where the key is a duration
|
The `minimal_roi` configuration parameter is a JSON object where the key is a duration
|
||||||
@@ -555,13 +500,13 @@ Configuration:
|
|||||||
Please carefully read the section [Market order pricing](#market-order-pricing) section when using market orders.
|
Please carefully read the section [Market order pricing](#market-order-pricing) section when using market orders.
|
||||||
|
|
||||||
!!! Note "Stoploss on exchange"
|
!!! Note "Stoploss on exchange"
|
||||||
`order_types.stoploss_on_exchange_interval` is not mandatory. Do not change its value if you are
|
`stoploss_on_exchange_interval` is not mandatory. Do not change its value if you are
|
||||||
unsure of what you are doing. For more information about how stoploss works please
|
unsure of what you are doing. For more information about how stoploss works please
|
||||||
refer to [the stoploss documentation](stoploss.md).
|
refer to [the stoploss documentation](stoploss.md).
|
||||||
|
|
||||||
If `order_types.stoploss_on_exchange` is enabled and the stoploss is cancelled manually on the exchange, then the bot will create a new stoploss order.
|
If `stoploss_on_exchange` is enabled and the stoploss is cancelled manually on the exchange, then the bot will create a new stoploss order.
|
||||||
|
|
||||||
!!! Warning "Warning: order_types.stoploss_on_exchange failures"
|
!!! Warning "Warning: stoploss_on_exchange failures"
|
||||||
If stoploss on exchange creation fails for some reason, then an "emergency exit" is initiated. By default, this will exit the trade using a market order. The order-type for the emergency-exit can be changed by setting the `emergency_exit` value in the `order_types` dictionary - however, this is not advised.
|
If stoploss on exchange creation fails for some reason, then an "emergency exit" is initiated. By default, this will exit the trade using a market order. The order-type for the emergency-exit can be changed by setting the `emergency_exit` value in the `order_types` dictionary - however, this is not advised.
|
||||||
|
|
||||||
### Understand order_time_in_force
|
### Understand order_time_in_force
|
||||||
@@ -587,7 +532,7 @@ is automatically cancelled by the exchange.
|
|||||||
**PO (Post only):**
|
**PO (Post only):**
|
||||||
|
|
||||||
Post only order. The order is either placed as a maker order, or it is canceled.
|
Post only order. The order is either placed as a maker order, or it is canceled.
|
||||||
This means the order must be placed on orderbook for at least time in an unfilled state.
|
This means the order must be placed on orderbook for at at least time in an unfilled state.
|
||||||
|
|
||||||
#### time_in_force config
|
#### time_in_force config
|
||||||
|
|
||||||
@@ -605,17 +550,10 @@ The possible values are: `GTC` (default), `FOK` or `IOC`.
|
|||||||
```
|
```
|
||||||
|
|
||||||
!!! Warning
|
!!! Warning
|
||||||
This is ongoing work. For now, it is supported only for binance, gate and kucoin.
|
This is ongoing work. For now, it is supported only for binance, gate, ftx and kucoin.
|
||||||
Please don't change the default value unless you know what you are doing and have researched the impact of using different values for your particular exchange.
|
Please don't change the default value unless you know what you are doing and have researched the impact of using different values for your particular exchange.
|
||||||
|
|
||||||
### Fiat conversion
|
### What values can be used for fiat_display_currency?
|
||||||
|
|
||||||
Freqtrade uses the Coingecko API to convert the coin value to it's corresponding fiat value for the Telegram reports.
|
|
||||||
The FIAT currency can be set in the configuration file as `fiat_display_currency`.
|
|
||||||
|
|
||||||
Removing `fiat_display_currency` completely from the configuration will skip initializing coingecko, and will not show any FIAT currency conversion. This has no importance for the correct functioning of the bot.
|
|
||||||
|
|
||||||
#### What values can be used for fiat_display_currency?
|
|
||||||
|
|
||||||
The `fiat_display_currency` configuration parameter sets the base currency to use for the
|
The `fiat_display_currency` configuration parameter sets the base currency to use for the
|
||||||
conversion from coin to fiat in the bot Telegram reports.
|
conversion from coin to fiat in the bot Telegram reports.
|
||||||
@@ -631,53 +569,9 @@ In addition to fiat currencies, a range of crypto currencies is supported.
|
|||||||
The valid values are:
|
The valid values are:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"BTC", "ETH", "XRP", "LTC", "BCH", "BNB"
|
"BTC", "ETH", "XRP", "LTC", "BCH", "USDT"
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Coingecko Rate limit problems
|
|
||||||
|
|
||||||
On some IP ranges, coingecko is heavily rate-limiting.
|
|
||||||
In such cases, you may want to add your coingecko API key to the configuration.
|
|
||||||
|
|
||||||
``` json
|
|
||||||
{
|
|
||||||
"fiat_display_currency": "USD",
|
|
||||||
"coingecko": {
|
|
||||||
"api_key": "your-api",
|
|
||||||
"is_demo": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Freqtrade supports both Demo and Pro coingecko API keys.
|
|
||||||
|
|
||||||
The Coingecko API key is NOT required for the bot to function correctly.
|
|
||||||
It is only used for the conversion of coin to fiat in the Telegram reports, which usually also work without API key.
|
|
||||||
|
|
||||||
## Consuming exchange Websockets
|
|
||||||
|
|
||||||
Freqtrade can consume websockets through ccxt.pro.
|
|
||||||
|
|
||||||
Freqtrade aims ensure data is available at all times.
|
|
||||||
Should the websocket connection fail (or be disabled), the bot will fall back to REST API calls.
|
|
||||||
|
|
||||||
Should you experience problems you suspect are caused by websockets, you can disable these via the setting `exchange.enable_ws`, which defaults to true.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
"exchange": {
|
|
||||||
// ...
|
|
||||||
"enable_ws": false,
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Should you be required to use a proxy, please refer to the [proxy section](#using-proxy-with-freqtrade) for more information.
|
|
||||||
|
|
||||||
!!! Info "Rollout"
|
|
||||||
We're implementing this out slowly, ensuring stability of your bots.
|
|
||||||
Currently, usage is limited to ohlcv data streams.
|
|
||||||
It's also limited to a few exchanges, with new exchanges being added on an ongoing basis.
|
|
||||||
|
|
||||||
## Using Dry-run mode
|
## Using Dry-run mode
|
||||||
|
|
||||||
We recommend starting the bot in the Dry-run mode to see how your bot will
|
We recommend starting the bot in the Dry-run mode to see how your bot will
|
||||||
@@ -697,7 +591,7 @@ creating trades on the exchange.
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
"exchange": {
|
"exchange": {
|
||||||
"name": "binance",
|
"name": "bittrex",
|
||||||
"key": "key",
|
"key": "key",
|
||||||
"secret": "secret",
|
"secret": "secret",
|
||||||
...
|
...
|
||||||
@@ -714,9 +608,8 @@ Once you will be happy with your bot performance running in the Dry-run mode, yo
|
|||||||
* API-keys may or may not be provided. Only Read-Only operations (i.e. operations that do not alter account state) on the exchange are performed in dry-run mode.
|
* API-keys may or may not be provided. Only Read-Only operations (i.e. operations that do not alter account state) on the exchange are performed in dry-run mode.
|
||||||
* Wallets (`/balance`) are simulated based on `dry_run_wallet`.
|
* Wallets (`/balance`) are simulated based on `dry_run_wallet`.
|
||||||
* Orders are simulated, and will not be posted to the exchange.
|
* Orders are simulated, and will not be posted to the exchange.
|
||||||
* Market orders fill based on orderbook volume the moment the order is placed, with a maximum slippage of 5%.
|
* Market orders fill based on orderbook volume the moment the order is placed.
|
||||||
* Limit orders fill once the price reaches the defined level - or time out based on `unfilledtimeout` settings.
|
* Limit orders fill once the price reaches the defined level - or time out based on `unfilledtimeout` settings.
|
||||||
* Limit orders will be converted to market orders if they cross the price by more than 1%, and will be filled immediately based regular market order rules (see point about Market orders above).
|
|
||||||
* In combination with `stoploss_on_exchange`, the stop_loss price is assumed to be filled.
|
* In combination with `stoploss_on_exchange`, the stop_loss price is assumed to be filled.
|
||||||
* Open orders (not trades, which are stored in the database) are kept open after bot restarts, with the assumption that they were not filled while being offline.
|
* Open orders (not trades, which are stored in the database) are kept open after bot restarts, with the assumption that they were not filled while being offline.
|
||||||
|
|
||||||
@@ -747,7 +640,7 @@ API Keys are usually only required for live trading (trading for real money, bot
|
|||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"exchange": {
|
"exchange": {
|
||||||
"name": "binance",
|
"name": "bittrex",
|
||||||
"key": "af8ddd35195e9dc500b9a6f799f6f5c93d89193b",
|
"key": "af8ddd35195e9dc500b9a6f799f6f5c93d89193b",
|
||||||
"secret": "08a9dc6db3d7b53e1acebd9275677f4b0a04f1a5",
|
"secret": "08a9dc6db3d7b53e1acebd9275677f4b0a04f1a5",
|
||||||
//"password": "", // Optional, not needed by all exchanges)
|
//"password": "", // Optional, not needed by all exchanges)
|
||||||
@@ -766,10 +659,9 @@ You should also make sure to read the [Exchanges](exchanges.md) section of the d
|
|||||||
|
|
||||||
**NEVER** share your private configuration file or your exchange keys with anyone!
|
**NEVER** share your private configuration file or your exchange keys with anyone!
|
||||||
|
|
||||||
## Using a proxy with Freqtrade
|
### Using proxy with Freqtrade
|
||||||
|
|
||||||
To use a proxy with freqtrade, export your proxy settings using the variables `"HTTP_PROXY"` and `"HTTPS_PROXY"` set to the appropriate values.
|
To use a proxy with freqtrade, export your proxy settings using the variables `"HTTP_PROXY"` and `"HTTPS_PROXY"` set to the appropriate values.
|
||||||
This will have the proxy settings applied to everything (telegram, coingecko, ...) **except** for exchange requests.
|
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
export HTTP_PROXY="http://addr:port"
|
export HTTP_PROXY="http://addr:port"
|
||||||
@@ -777,23 +669,20 @@ export HTTPS_PROXY="http://addr:port"
|
|||||||
freqtrade
|
freqtrade
|
||||||
```
|
```
|
||||||
|
|
||||||
### Proxy exchange requests
|
#### Proxy just exchange requests
|
||||||
|
|
||||||
To use a proxy for exchange connections - you will have to define the proxies as part of the ccxt configuration.
|
To use a proxy just for exchange connections (skips/ignores telegram and coingecko) - you can also define the proxies as part of the ccxt configuration.
|
||||||
|
|
||||||
``` json
|
``` json
|
||||||
{
|
|
||||||
"exchange": {
|
|
||||||
"ccxt_config": {
|
"ccxt_config": {
|
||||||
"httpsProxy": "http://addr:port",
|
"aiohttp_proxy": "http://addr:port",
|
||||||
"wsProxy": "http://addr:port",
|
"proxies": {
|
||||||
}
|
"http": "http://addr:port",
|
||||||
}
|
"https": "http://addr:port"
|
||||||
|
},
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
For more information on available proxy types, please consult the [ccxt proxy documentation](https://docs.ccxt.com/#/README?id=proxy).
|
|
||||||
|
|
||||||
## Next step
|
## Next step
|
||||||
|
|
||||||
Now you have configured your config.json, the next step is to [start your bot](bot-usage.md).
|
Now you have configured your config.json, the next step is to [start your bot](bot-usage.md).
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ You can analyze the results of backtests and trading history easily using Jupyte
|
|||||||
## Quick start with docker
|
## Quick start with docker
|
||||||
|
|
||||||
Freqtrade provides a docker-compose file which starts up a jupyter lab server.
|
Freqtrade provides a docker-compose file which starts up a jupyter lab server.
|
||||||
You can run this server using the following command: `docker compose -f docker/docker-compose-jupyter.yml up`
|
You can run this server using the following command: `docker-compose -f docker/docker-compose-jupyter.yml up`
|
||||||
|
|
||||||
This will create a dockercontainer running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`.
|
This will create a dockercontainer running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`.
|
||||||
Please use the link that's printed in the console after startup for simplified login.
|
Please use the link that's printed in the console after startup for simplified login.
|
||||||
|
|
||||||
For more information, Please visit the [Data analysis with Docker](docker_quickstart.md#data-analysis-using-docker-compose) section.
|
For more information, Please visit the [Data analysis with Docker](docker_quickstart.md#data-analayis-using-docker-compose) section.
|
||||||
|
|
||||||
### Pro tips
|
### Pro tips
|
||||||
|
|
||||||
@@ -27,7 +27,7 @@ For this to work, first activate your virtual environment and run the following
|
|||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# Activate virtual environment
|
# Activate virtual environment
|
||||||
source .venv/bin/activate
|
source .env/bin/activate
|
||||||
|
|
||||||
pip install ipykernel
|
pip install ipykernel
|
||||||
ipython kernel install --user --name=freqtrade
|
ipython kernel install --user --name=freqtrade
|
||||||
@@ -83,7 +83,7 @@ from pathlib import Path
|
|||||||
project_root = "somedir/freqtrade"
|
project_root = "somedir/freqtrade"
|
||||||
i=0
|
i=0
|
||||||
try:
|
try:
|
||||||
os.chdir(project_root)
|
os.chdirdir(project_root)
|
||||||
assert Path('LICENSE').is_file()
|
assert Path('LICENSE').is_file()
|
||||||
except:
|
except:
|
||||||
while i<4 and (not Path('LICENSE').is_file()):
|
while i<4 and (not Path('LICENSE').is_file()):
|
||||||
|
|||||||
@@ -6,13 +6,14 @@ To download data (candles / OHLCV) needed for backtesting and hyperoptimization
|
|||||||
|
|
||||||
If no additional parameter is specified, freqtrade will download data for `"1m"` and `"5m"` timeframes for the last 30 days.
|
If no additional parameter is specified, freqtrade will download data for `"1m"` and `"5m"` timeframes for the last 30 days.
|
||||||
Exchange and pairs will come from `config.json` (if specified using `-c/--config`).
|
Exchange and pairs will come from `config.json` (if specified using `-c/--config`).
|
||||||
Without provided configuration, `--exchange` becomes mandatory.
|
Otherwise `--exchange` becomes mandatory.
|
||||||
|
|
||||||
You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used.
|
You can use a relative timerange (`--days 20`) or an absolute starting point (`--timerange 20200101-`). For incremental downloads, the relative approach should be used.
|
||||||
|
|
||||||
!!! Tip "Tip: Updating existing data"
|
!!! Tip "Tip: Updating existing data"
|
||||||
If you already have backtesting data available in your data-directory and would like to refresh this data up to today, freqtrade will automatically calculate the missing timerange for the existing pairs and the download will occur from the latest available point until "now", neither `--days` or `--timerange` parameters are required. Freqtrade will keep the available data and only download the missing data.
|
If you already have backtesting data available in your data-directory and would like to refresh this data up to today, freqtrade will automatically calculate the data missing for the existing pairs and the download will occur from the latest available point until "now", neither --days or --timerange parameters are required. Freqtrade will keep the available data and only download the missing data.
|
||||||
If you are updating existing data after inserting new pairs that you have no data for, use the `--new-pairs-days xx` parameter. Specified number of days will be downloaded for new pairs while old pairs will be updated with missing data only.
|
If you are updating existing data after inserting new pairs that you have no data for, use `--new-pairs-days xx` parameter. Specified number of days will be downloaded for new pairs while old pairs will be updated with missing data only.
|
||||||
|
If you use `--days xx` parameter alone - data for specified number of days will be downloaded for _all_ pairs. Be careful, if specified number of days is smaller than gap between now and last downloaded candle - freqtrade will delete all existing data to avoid gaps in candle data.
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
@@ -23,14 +24,14 @@ usage: freqtrade download-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||||||
[--days INT] [--new-pairs-days INT]
|
[--days INT] [--new-pairs-days INT]
|
||||||
[--include-inactive-pairs]
|
[--include-inactive-pairs]
|
||||||
[--timerange TIMERANGE] [--dl-trades]
|
[--timerange TIMERANGE] [--dl-trades]
|
||||||
[--convert] [--exchange EXCHANGE]
|
[--exchange EXCHANGE]
|
||||||
[-t TIMEFRAMES [TIMEFRAMES ...]] [--erase]
|
[-t TIMEFRAMES [TIMEFRAMES ...]] [--erase]
|
||||||
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
||||||
[--data-format-trades {json,jsongz,hdf5,feather,parquet}]
|
[--data-format-trades {json,jsongz,hdf5}]
|
||||||
[--trading-mode {spot,margin,futures}]
|
[--trading-mode {spot,margin,futures}]
|
||||||
[--prepend]
|
[--prepend]
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||||
Limit command to these pairs. Pairs are space-
|
Limit command to these pairs. Pairs are space-
|
||||||
@@ -47,12 +48,8 @@ options:
|
|||||||
--dl-trades Download trades instead of OHLCV data. The bot will
|
--dl-trades Download trades instead of OHLCV data. The bot will
|
||||||
resample trades to the desired timeframe as specified
|
resample trades to the desired timeframe as specified
|
||||||
as --timeframes/-t.
|
as --timeframes/-t.
|
||||||
--convert Convert downloaded trades to OHLCV data. Only
|
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||||
applicable in combination with `--dl-trades`. Will be
|
config is provided.
|
||||||
automatic for exchanges which don't have historic
|
|
||||||
OHLCV (e.g. Kraken). If not provided, use `trades-to-
|
|
||||||
ohlcv` to convert trades data to OHLCV data.
|
|
||||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
|
||||||
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
||||||
Specify which tickers to download. Space-separated
|
Specify which tickers to download. Space-separated
|
||||||
list. Default: `1m 5m`.
|
list. Default: `1m 5m`.
|
||||||
@@ -60,18 +57,17 @@ options:
|
|||||||
exchange/pairs/timeframes.
|
exchange/pairs/timeframes.
|
||||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
||||||
Storage format for downloaded candle (OHLCV) data.
|
Storage format for downloaded candle (OHLCV) data.
|
||||||
(default: `feather`).
|
(default: `json`).
|
||||||
--data-format-trades {json,jsongz,hdf5,feather,parquet}
|
--data-format-trades {json,jsongz,hdf5}
|
||||||
Storage format for downloaded trades data. (default:
|
Storage format for downloaded trades data. (default:
|
||||||
`feather`).
|
`jsongz`).
|
||||||
--trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
|
--trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
|
||||||
Select Trading mode
|
Select Trading mode
|
||||||
--prepend Allow data prepending. (Data-appending is disabled)
|
--prepend Allow data prepending. (Data-appending is disabled)
|
||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
--logfile FILE, --log-file FILE
|
--logfile FILE Log to the file specified. Special values are:
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
'syslog', 'journald'. See the documentation for more
|
||||||
details.
|
details.
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
@@ -87,48 +83,40 @@ Common arguments:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Tip "Downloading all data for one quote currency"
|
|
||||||
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
|
|
||||||
`freqtrade download-data --exchange binance --pairs ".*/USDT" <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
|
|
||||||
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
|
|
||||||
|
|
||||||
!!! Note "Startup period"
|
!!! Note "Startup period"
|
||||||
`download-data` is a strategy-independent command. The idea is to download a big chunk of data once, and then iteratively increase the amount of data stored.
|
`download-data` is a strategy-independent command. The idea is to download a big chunk of data once, and then iteratively increase the amount of data stored.
|
||||||
|
|
||||||
For that reason, `download-data` does not care about the "startup-period" defined in a strategy. It's up to the user to download additional days if the backtest should start at a specific point in time (while respecting startup period).
|
For that reason, `download-data` does not care about the "startup-period" defined in a strategy. It's up to the user to download additional days if the backtest should start at a specific point in time (while respecting startup period).
|
||||||
|
|
||||||
### Start download
|
### Pairs file
|
||||||
|
|
||||||
A very simple command (assuming an available `config.json` file) can look as follows.
|
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
|
||||||
|
If you are using Binance for example:
|
||||||
|
|
||||||
|
- create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
|
||||||
|
- update the `pairs.json` file to contain the currency pairs you are interested in.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
freqtrade download-data --exchange binance
|
mkdir -p user_data/data/binance
|
||||||
|
touch user_data/data/binance/pairs.json
|
||||||
```
|
```
|
||||||
|
|
||||||
This will download historical candle (OHLCV) data for all the currency pairs defined in the configuration.
|
The format of the `pairs.json` file is a simple json list.
|
||||||
|
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
|
||||||
|
|
||||||
Alternatively, specify the pairs directly
|
``` json
|
||||||
|
[
|
||||||
```bash
|
"ETH/BTC",
|
||||||
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
|
"ETH/USDT",
|
||||||
|
"BTC/USDT",
|
||||||
|
"XRP/ETH"
|
||||||
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
or as regex (in this case, to download all active USDT pairs)
|
!!! Tip "Downloading all data for one quote currency"
|
||||||
|
Often, you'll want to download data for all pairs of a specific quote-currency. In such cases, you can use the following shorthand:
|
||||||
```bash
|
`freqtrade download-data --exchange binance --pairs .*/USDT <...>`. The provided "pairs" string will be expanded to contain all active pairs on the exchange.
|
||||||
freqtrade download-data --exchange binance --pairs ".*/USDT"
|
To also download data for inactive (delisted) pairs, add `--include-inactive-pairs` to the command.
|
||||||
```
|
|
||||||
|
|
||||||
### Other Notes
|
|
||||||
|
|
||||||
* To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
|
|
||||||
* To change the exchange used to download the historical data from, either use `--exchange <exchange>` - or specify a different configuration file.
|
|
||||||
* To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
|
|
||||||
* To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
|
|
||||||
* To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
|
|
||||||
* Given starting points are ignored if data is already available, downloading only missing data up to today.
|
|
||||||
* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
|
|
||||||
* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
|
|
||||||
|
|
||||||
??? Note "Permission denied errors"
|
??? Note "Permission denied errors"
|
||||||
If your configuration directory `user_data` was made by docker, you may get the following error:
|
If your configuration directory `user_data` was made by docker, you may get the following error:
|
||||||
@@ -143,7 +131,39 @@ freqtrade download-data --exchange binance --pairs ".*/USDT"
|
|||||||
sudo chown -R $UID:$GID user_data
|
sudo chown -R $UID:$GID user_data
|
||||||
```
|
```
|
||||||
|
|
||||||
### Download additional data before the current timerange
|
### Start download
|
||||||
|
|
||||||
|
Then run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade download-data --exchange binance
|
||||||
|
```
|
||||||
|
|
||||||
|
This will download historical candle (OHLCV) data for all the currency pairs you defined in `pairs.json`.
|
||||||
|
|
||||||
|
Alternatively, specify the pairs directly
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT
|
||||||
|
```
|
||||||
|
|
||||||
|
or as regex (to download all active USDT pairs)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade download-data --exchange binance --pairs .*/USDT
|
||||||
|
```
|
||||||
|
|
||||||
|
### Other Notes
|
||||||
|
|
||||||
|
- To use a different directory than the exchange specific default, use `--datadir user_data/data/some_directory`.
|
||||||
|
- To change the exchange used to download the historical data from, please use a different configuration file (you'll probably need to adjust rate limits etc.)
|
||||||
|
- To use `pairs.json` from some other directory, use `--pairs-file some_other_dir/pairs.json`.
|
||||||
|
- To download historical candle (OHLCV) data for only 10 days, use `--days 10` (defaults to 30 days).
|
||||||
|
- To download historical candle (OHLCV) data from a fixed starting point, use `--timerange 20200101-` - which will download all data from January 1st, 2020.
|
||||||
|
- Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
|
||||||
|
- To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
|
||||||
|
|
||||||
|
#### Download additional data before the current timerange
|
||||||
|
|
||||||
Assuming you downloaded all data from 2022 (`--timerange 20220101-`) - but you'd now like to also backtest with earlier data.
|
Assuming you downloaded all data from 2022 (`--timerange 20220101-`) - but you'd now like to also backtest with earlier data.
|
||||||
You can do so by using the `--prepend` flag, combined with `--timerange` - specifying an end-date.
|
You can do so by using the `--prepend` flag, combined with `--timerange` - specifying an end-date.
|
||||||
@@ -157,15 +177,15 @@ freqtrade download-data --exchange binance --pairs ETH/USDT XRP/USDT BTC/USDT --
|
|||||||
|
|
||||||
### Data format
|
### Data format
|
||||||
|
|
||||||
Freqtrade currently supports the following data-formats:
|
Freqtrade currently supports 3 data-formats for both OHLCV and trades data:
|
||||||
|
|
||||||
* `feather` - a dataformat based on Apache Arrow
|
|
||||||
* `json` - plain "text" json files
|
* `json` - plain "text" json files
|
||||||
* `jsongz` - a gzip-zipped version of json files
|
* `jsongz` - a gzip-zipped version of json files
|
||||||
* `hdf5` - a high performance datastore (deprecated)
|
* `hdf5` - a high performance datastore
|
||||||
* `parquet` - columnar datastore (OHLCV only)
|
* `feather` - a dataformat based on Apache Arrow
|
||||||
|
* `parquet` - columnar datastore
|
||||||
|
|
||||||
By default, both OHLCV data and trades data are stored in the `feather` format.
|
By default, OHLCV data is stored as `json` data, while trades data is stored as `jsongz` data.
|
||||||
|
|
||||||
This can be changed via the `--data-format-ohlcv` and `--data-format-trades` command line arguments respectively.
|
This can be changed via the `--data-format-ohlcv` and `--data-format-trades` command line arguments respectively.
|
||||||
To persist this change, you should also add the following snippet to your configuration, so you don't have to insert the above arguments each time:
|
To persist this change, you should also add the following snippet to your configuration, so you don't have to insert the above arguments each time:
|
||||||
@@ -208,46 +228,17 @@ time freqtrade list-data --show-timerange --data-format-ohlcv <dataformat>
|
|||||||
|
|
||||||
| Format | Size | timing |
|
| Format | Size | timing |
|
||||||
|------------|-------------|-------------|
|
|------------|-------------|-------------|
|
||||||
| `feather` | 72Mb | 3.5s |
|
|
||||||
| `json` | 149Mb | 25.6s |
|
| `json` | 149Mb | 25.6s |
|
||||||
| `jsongz` | 39Mb | 27s |
|
| `jsongz` | 39Mb | 27s |
|
||||||
| `hdf5` | 145Mb | 3.9s |
|
| `hdf5` | 145Mb | 3.9s |
|
||||||
|
| `feather` | 72Mb | 3.5s |
|
||||||
| `parquet` | 83Mb | 3.8s |
|
| `parquet` | 83Mb | 3.8s |
|
||||||
|
|
||||||
Size has been taken from the BTC/USDT 1m spot combination for the timerange specified above.
|
Size has been taken from the BTC/USDT 1m spot combination for the timerange specified above.
|
||||||
|
|
||||||
To have a best performance/size mix, we recommend using the default feather format, or parquet.
|
To have a best performance/size mix, we recommend the use of either feather or parquet.
|
||||||
|
|
||||||
### Pairs file
|
#### Sub-command convert data
|
||||||
|
|
||||||
In alternative to the whitelist from `config.json`, a `pairs.json` file can be used.
|
|
||||||
If you are using Binance for example:
|
|
||||||
|
|
||||||
* create a directory `user_data/data/binance` and copy or create the `pairs.json` file in that directory.
|
|
||||||
* update the `pairs.json` file to contain the currency pairs you are interested in.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mkdir -p user_data/data/binance
|
|
||||||
touch user_data/data/binance/pairs.json
|
|
||||||
```
|
|
||||||
|
|
||||||
The format of the `pairs.json` file is a simple json list.
|
|
||||||
Mixing different stake-currencies is allowed for this file, since it's only used for downloading.
|
|
||||||
|
|
||||||
``` json
|
|
||||||
[
|
|
||||||
"ETH/BTC",
|
|
||||||
"ETH/USDT",
|
|
||||||
"BTC/USDT",
|
|
||||||
"XRP/ETH"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
The `pairs.json` file is only used when no configuration is loaded (implicitly by naming, or via `--config` flag).
|
|
||||||
You can force the usage of this file via `--pairs-file pairs.json` - however we recommend to use the pairlist from within the configuration, either via `exchange.pair_whitelist` or `pairs` setting in the configuration.
|
|
||||||
|
|
||||||
## Sub-command convert data
|
|
||||||
|
|
||||||
```
|
```
|
||||||
usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
||||||
@@ -260,7 +251,7 @@ usage: freqtrade convert-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||||||
[--trading-mode {spot,margin,futures}]
|
[--trading-mode {spot,margin,futures}]
|
||||||
[--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]]
|
[--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]]
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||||
Limit command to these pairs. Pairs are space-
|
Limit command to these pairs. Pairs are space-
|
||||||
@@ -271,20 +262,19 @@ options:
|
|||||||
Destination format for data conversion.
|
Destination format for data conversion.
|
||||||
--erase Clean all existing data for the selected
|
--erase Clean all existing data for the selected
|
||||||
exchange/pairs/timeframes.
|
exchange/pairs/timeframes.
|
||||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||||
|
config is provided.
|
||||||
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
||||||
Specify which tickers to download. Space-separated
|
Specify which tickers to download. Space-separated
|
||||||
list. Default: `1m 5m`.
|
list. Default: `1m 5m`.
|
||||||
--trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
|
--trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
|
||||||
Select Trading mode
|
Select Trading mode
|
||||||
--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]
|
--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]
|
||||||
Select candle type to convert. Defaults to all
|
Select candle type to use
|
||||||
available types.
|
|
||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
--logfile FILE, --log-file FILE
|
--logfile FILE Log to the file specified. Special values are:
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
'syslog', 'journald'. See the documentation for more
|
||||||
details.
|
details.
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
@@ -297,9 +287,10 @@ Common arguments:
|
|||||||
Path to directory with historical backtesting data.
|
Path to directory with historical backtesting data.
|
||||||
--userdir PATH, --user-data-dir PATH
|
--userdir PATH, --user-data-dir PATH
|
||||||
Path to userdata directory.
|
Path to userdata directory.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example converting data
|
##### Example converting data
|
||||||
|
|
||||||
The following command will convert all candle (OHLCV) data available in `~/.freqtrade/data/binance` from json to jsongz, saving diskspace in the process.
|
The following command will convert all candle (OHLCV) data available in `~/.freqtrade/data/binance` from json to jsongz, saving diskspace in the process.
|
||||||
It'll also remove original json data files (`--erase` parameter).
|
It'll also remove original json data files (`--erase` parameter).
|
||||||
@@ -308,7 +299,7 @@ It'll also remove original json data files (`--erase` parameter).
|
|||||||
freqtrade convert-data --format-from json --format-to jsongz --datadir ~/.freqtrade/data/binance -t 5m 15m --erase
|
freqtrade convert-data --format-from json --format-to jsongz --datadir ~/.freqtrade/data/binance -t 5m 15m --erase
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sub-command convert trade data
|
#### Sub-command convert trade data
|
||||||
|
|
||||||
```
|
```
|
||||||
usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
||||||
@@ -319,7 +310,7 @@ usage: freqtrade convert-trade-data [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||||||
{json,jsongz,hdf5,feather,parquet}
|
{json,jsongz,hdf5,feather,parquet}
|
||||||
[--erase] [--exchange EXCHANGE]
|
[--erase] [--exchange EXCHANGE]
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||||
Limit command to these pairs. Pairs are space-
|
Limit command to these pairs. Pairs are space-
|
||||||
@@ -330,12 +321,12 @@ options:
|
|||||||
Destination format for data conversion.
|
Destination format for data conversion.
|
||||||
--erase Clean all existing data for the selected
|
--erase Clean all existing data for the selected
|
||||||
exchange/pairs/timeframes.
|
exchange/pairs/timeframes.
|
||||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||||
|
config is provided.
|
||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
--logfile FILE, --log-file FILE
|
--logfile FILE Log to the file specified. Special values are:
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
'syslog', 'journald'. See the documentation for more
|
||||||
details.
|
details.
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
@@ -351,7 +342,7 @@ Common arguments:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example converting trades
|
##### Example converting trades
|
||||||
|
|
||||||
The following command will convert all available trade-data in `~/.freqtrade/data/kraken` from jsongz to json.
|
The following command will convert all available trade-data in `~/.freqtrade/data/kraken` from jsongz to json.
|
||||||
It'll also remove original jsongz data files (`--erase` parameter).
|
It'll also remove original jsongz data files (`--erase` parameter).
|
||||||
@@ -360,7 +351,7 @@ It'll also remove original jsongz data files (`--erase` parameter).
|
|||||||
freqtrade convert-trade-data --format-from jsongz --format-to json --datadir ~/.freqtrade/data/kraken --erase
|
freqtrade convert-trade-data --format-from jsongz --format-to json --datadir ~/.freqtrade/data/kraken --erase
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sub-command trades to ohlcv
|
### Sub-command trades to ohlcv
|
||||||
|
|
||||||
When you need to use `--dl-trades` (kraken only) to download data, conversion of trades data to ohlcv data is the last step.
|
When you need to use `--dl-trades` (kraken only) to download data, conversion of trades data to ohlcv data is the last step.
|
||||||
This command will allow you to repeat this last step for additional timeframes without re-downloading the data.
|
This command will allow you to repeat this last step for additional timeframes without re-downloading the data.
|
||||||
@@ -372,9 +363,9 @@ usage: freqtrade trades-to-ohlcv [-h] [-v] [--logfile FILE] [-V] [-c PATH]
|
|||||||
[-t TIMEFRAMES [TIMEFRAMES ...]]
|
[-t TIMEFRAMES [TIMEFRAMES ...]]
|
||||||
[--exchange EXCHANGE]
|
[--exchange EXCHANGE]
|
||||||
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
||||||
[--data-format-trades {json,jsongz,hdf5,feather}]
|
[--data-format-trades {json,jsongz,hdf5}]
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||||
Limit command to these pairs. Pairs are space-
|
Limit command to these pairs. Pairs are space-
|
||||||
@@ -382,18 +373,18 @@ options:
|
|||||||
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
-t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...]
|
||||||
Specify which tickers to download. Space-separated
|
Specify which tickers to download. Space-separated
|
||||||
list. Default: `1m 5m`.
|
list. Default: `1m 5m`.
|
||||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||||
|
config is provided.
|
||||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
||||||
Storage format for downloaded candle (OHLCV) data.
|
Storage format for downloaded candle (OHLCV) data.
|
||||||
(default: `feather`).
|
(default: `json`).
|
||||||
--data-format-trades {json,jsongz,hdf5,feather}
|
--data-format-trades {json,jsongz,hdf5}
|
||||||
Storage format for downloaded trades data. (default:
|
Storage format for downloaded trades data. (default:
|
||||||
`feather`).
|
`jsongz`).
|
||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
--logfile FILE, --log-file FILE
|
--logfile FILE Log to the file specified. Special values are:
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
'syslog', 'journald'. See the documentation for more
|
||||||
details.
|
details.
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
@@ -409,13 +400,13 @@ Common arguments:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example trade-to-ohlcv conversion
|
#### Example trade-to-ohlcv conversion
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
freqtrade trades-to-ohlcv --exchange kraken -t 5m 1h 1d --pairs BTC/EUR ETH/EUR
|
freqtrade trades-to-ohlcv --exchange kraken -t 5m 1h 1d --pairs BTC/EUR ETH/EUR
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sub-command list-data
|
### Sub-command list-data
|
||||||
|
|
||||||
You can get a list of downloaded data using the `list-data` sub-command.
|
You can get a list of downloaded data using the `list-data` sub-command.
|
||||||
|
|
||||||
@@ -423,21 +414,17 @@ You can get a list of downloaded data using the `list-data` sub-command.
|
|||||||
usage: freqtrade list-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
|
usage: freqtrade list-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
|
||||||
[--userdir PATH] [--exchange EXCHANGE]
|
[--userdir PATH] [--exchange EXCHANGE]
|
||||||
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
||||||
[--data-format-trades {json,jsongz,hdf5,feather,parquet}]
|
[-p PAIRS [PAIRS ...]]
|
||||||
[--trades] [-p PAIRS [PAIRS ...]]
|
|
||||||
[--trading-mode {spot,margin,futures}]
|
[--trading-mode {spot,margin,futures}]
|
||||||
[--show-timerange]
|
[--show-timerange]
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
--exchange EXCHANGE Exchange name. Only valid if no config is provided.
|
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no
|
||||||
|
config is provided.
|
||||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
||||||
Storage format for downloaded candle (OHLCV) data.
|
Storage format for downloaded candle (OHLCV) data.
|
||||||
(default: `feather`).
|
(default: `json`).
|
||||||
--data-format-trades {json,jsongz,hdf5,feather,parquet}
|
|
||||||
Storage format for downloaded trades data. (default:
|
|
||||||
`feather`).
|
|
||||||
--trades Work on trades data instead of OHLCV data.
|
|
||||||
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
-p PAIRS [PAIRS ...], --pairs PAIRS [PAIRS ...]
|
||||||
Limit command to these pairs. Pairs are space-
|
Limit command to these pairs. Pairs are space-
|
||||||
separated.
|
separated.
|
||||||
@@ -448,8 +435,7 @@ options:
|
|||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
--logfile FILE, --log-file FILE
|
--logfile FILE Log to the file specified. Special values are:
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
'syslog', 'journald'. See the documentation for more
|
||||||
details.
|
details.
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
@@ -465,52 +451,31 @@ Common arguments:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example list-data
|
#### Example list-data
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
> freqtrade list-data --userdir ~/.freqtrade/user_data/
|
> freqtrade list-data --userdir ~/.freqtrade/user_data/
|
||||||
|
|
||||||
Found 33 pair / timeframe combinations.
|
Found 33 pair / timeframe combinations.
|
||||||
┏━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━┓
|
pairs timeframe
|
||||||
┃ Pair ┃ Timeframe ┃ Type ┃
|
---------- -----------------------------------------
|
||||||
┡━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━┩
|
ADA/BTC 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d
|
||||||
│ ADA/BTC │ 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d │ spot │
|
ADA/ETH 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d
|
||||||
│ ADA/ETH │ 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d │ spot │
|
ETH/BTC 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d
|
||||||
│ ETH/BTC │ 5m, 15m, 30m, 1h, 2h, 4h, 6h, 12h, 1d │ spot │
|
ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h
|
||||||
│ ETH/USDT │ 5m, 15m, 30m, 1h, 2h, 4h │ spot │
|
|
||||||
└───────────────┴───────────────────────────────────────────┴──────┘
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Show all trades data including from/to timerange
|
### Trades (tick) data
|
||||||
|
|
||||||
``` bash
|
By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API.
|
||||||
> freqtrade list-data --show --trades
|
|
||||||
Found trades data for 1 pair.
|
|
||||||
┏━━━━━━━━━┳━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┓
|
|
||||||
┃ Pair ┃ Type ┃ From ┃ To ┃ Trades ┃
|
|
||||||
┡━━━━━━━━━╇━━━━━━╇━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━┩
|
|
||||||
│ XRP/ETH │ spot │ 2019-10-11 00:00:11 │ 2019-10-13 11:19:28 │ 12477 │
|
|
||||||
└─────────┴──────┴─────────────────────┴─────────────────────┴────────┘
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
## Trades (tick) data
|
|
||||||
|
|
||||||
By default, `download-data` sub-command downloads Candles (OHLCV) data. Most exchanges also provide historic trade-data via their API.
|
|
||||||
This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes.
|
This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes.
|
||||||
|
|
||||||
Since this data is large by default, the files use the feather file format by default. They are stored in your data-directory with the naming convention of `<pair>-trades.feather` (`ETH_BTC-trades.feather`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository.
|
Since this data is large by default, the files use gzip by default. They are stored in your data-directory with the naming convention of `<pair>-trades.json.gz` (`ETH_BTC-trades.json.gz`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository.
|
||||||
|
|
||||||
To use this mode, simply add `--dl-trades` to your call. This will swap the download method to download trades.
|
To use this mode, simply add `--dl-trades` to your call. This will swap the download method to download trades, and resamples the data locally.
|
||||||
If `--convert` is also provided, the resample step will happen automatically and overwrite eventually existing OHLCV data for the given pair/timeframe combinations.
|
|
||||||
|
|
||||||
!!! Warning "Do not use"
|
!!! Warning "do not use"
|
||||||
You should not use this unless you're a kraken user (Kraken does not provide historic OHLCV data).
|
You should not use this unless you're a kraken user. Most other exchanges provide OHLCV data with sufficient history.
|
||||||
Most other exchanges provide OHLCV data with sufficient history, so downloading multiple timeframes through that method will still proof to be a lot faster than downloading trades data.
|
|
||||||
|
|
||||||
!!! Note "Kraken user"
|
|
||||||
Kraken users should read [this](exchanges.md#historic-kraken-data) before starting to download data.
|
|
||||||
|
|
||||||
Example call:
|
Example call:
|
||||||
|
|
||||||
@@ -521,6 +486,12 @@ freqtrade download-data --exchange kraken --pairs XRP/EUR ETH/EUR --days 20 --dl
|
|||||||
!!! Note
|
!!! Note
|
||||||
While this method uses async calls, it will be slow, since it requires the result of the previous call to generate the next request to the exchange.
|
While this method uses async calls, it will be slow, since it requires the result of the previous call to generate the next request to the exchange.
|
||||||
|
|
||||||
|
!!! Warning
|
||||||
|
The historic trades are not available during Freqtrade dry-run and live trade modes because all exchanges tested provide this data with a delay of few 100 candles, so it's not suitable for real-time trading.
|
||||||
|
|
||||||
|
!!! Note "Kraken user"
|
||||||
|
Kraken users should read [this](exchanges.md#historic-kraken-data) before starting to download data.
|
||||||
|
|
||||||
## Next step
|
## Next step
|
||||||
|
|
||||||
Great, you now have some data downloaded, so you can now start [backtesting](backtesting.md) your strategy.
|
Great, you now have backtest data downloaded, so you can now start [backtesting](backtesting.md) your strategy.
|
||||||
|
|||||||
@@ -66,25 +66,11 @@ We will keep a compatibility layer for 1-2 versions (so both `buy_tag` and `ente
|
|||||||
|
|
||||||
#### Naming changes
|
#### Naming changes
|
||||||
|
|
||||||
Webhook terminology changed from "sell" to "exit", and from "buy" to "entry", removing "webhook" in the process.
|
Webhook terminology changed from "sell" to "exit", and from "buy" to "entry".
|
||||||
|
|
||||||
* `webhookbuy`, `webhookentry` -> `entry`
|
* `webhookbuy` -> `webhookentry`
|
||||||
* `webhookbuyfill`, `webhookentryfill` -> `entry_fill`
|
* `webhookbuyfill` -> `webhookentryfill`
|
||||||
* `webhookbuycancel`, `webhookentrycancel` -> `entry_cancel`
|
* `webhookbuycancel` -> `webhookentrycancel`
|
||||||
* `webhooksell`, `webhookexit` -> `exit`
|
* `webhooksell` -> `webhookexit`
|
||||||
* `webhooksellfill`, `webhookexitfill` -> `exit_fill`
|
* `webhooksellfill` -> `webhookexitfill`
|
||||||
* `webhooksellcancel`, `webhookexitcancel` -> `exit_cancel`
|
* `webhooksellcancel` -> `webhookexitcancel`
|
||||||
|
|
||||||
## Removal of `populate_any_indicators`
|
|
||||||
|
|
||||||
version 2023.3 saw the removal of `populate_any_indicators` in favor of split methods for feature engineering and targets. Please read the [migration document](strategy_migration.md#freqai-strategy) for full details.
|
|
||||||
|
|
||||||
## Removal of `protections` from configuration
|
|
||||||
|
|
||||||
Setting protections from the configuration via `"protections": [],` has been removed in 2024.10, after having raised deprecation warnings for over 3 years.
|
|
||||||
|
|
||||||
## hdf5 data storage
|
|
||||||
|
|
||||||
Using hdf5 as data storage has been deprecated in 2024.12 and will be removed in 2025.1. We recommend switching to the feather data format.
|
|
||||||
|
|
||||||
Please use the [`convert-data` subcommand](data-download.md#sub-command-convert-data) to convert your existing data to one of the supported formats.
|
|
||||||
|
|||||||
@@ -22,9 +22,9 @@ This will spin up a local server (usually on port 8000) so you can see if everyt
|
|||||||
## Developer setup
|
## Developer setup
|
||||||
|
|
||||||
To configure a development environment, you can either use the provided [DevContainer](#devcontainer-setup), or use the `setup.sh` script and answer "y" when asked "Do you want to install dependencies for dev [y/N]? ".
|
To configure a development environment, you can either use the provided [DevContainer](#devcontainer-setup), or use the `setup.sh` script and answer "y" when asked "Do you want to install dependencies for dev [y/N]? ".
|
||||||
Alternatively (e.g. if your system is not supported by the setup.sh script), follow the manual installation process and run `pip3 install -r requirements-dev.txt` - followed by `pip3 install -e .[all]`.
|
Alternatively (e.g. if your system is not supported by the setup.sh script), follow the manual installation process and run `pip3 install -e .[all]`.
|
||||||
|
|
||||||
This will install all required tools for development, including `pytest`, `ruff`, `mypy`, and `coveralls`.
|
This will install all required tools for development, including `pytest`, `flake8`, `mypy`, and `coveralls`.
|
||||||
|
|
||||||
Then install the git hook scripts by running `pre-commit install`, so your changes will be verified locally before committing.
|
Then install the git hook scripts by running `pre-commit install`, so your changes will be verified locally before committing.
|
||||||
This avoids a lot of waiting for CI already, as some basic formatting checks are done locally on your machine.
|
This avoids a lot of waiting for CI already, as some basic formatting checks are done locally on your machine.
|
||||||
@@ -49,13 +49,6 @@ For more information about the [Remote container extension](https://code.visuals
|
|||||||
New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests.
|
New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests.
|
||||||
If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you).
|
If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you).
|
||||||
|
|
||||||
#### How to run tests
|
|
||||||
|
|
||||||
Use `pytest` in root folder to run all available testcases and confirm your local environment is setup correctly
|
|
||||||
|
|
||||||
!!! Note "feature branches"
|
|
||||||
Tests are expected to pass on the `develop` and `stable` branches. Other branches may be work in progress with tests not working yet.
|
|
||||||
|
|
||||||
#### Checking log content in tests
|
#### Checking log content in tests
|
||||||
|
|
||||||
Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages).
|
Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages).
|
||||||
@@ -77,13 +70,13 @@ def test_method_to_test(caplog):
|
|||||||
|
|
||||||
### Debug configuration
|
### Debug configuration
|
||||||
|
|
||||||
To debug freqtrade, we recommend VSCode (with the Python extension) with the following launch configuration (located in `.vscode/launch.json`).
|
To debug freqtrade, we recommend VSCode with the following launch configuration (located in `.vscode/launch.json`).
|
||||||
Details will obviously vary between setups - but this should work to get you started.
|
Details will obviously vary between setups - but this should work to get you started.
|
||||||
|
|
||||||
``` json
|
``` json
|
||||||
{
|
{
|
||||||
"name": "freqtrade trade",
|
"name": "freqtrade trade",
|
||||||
"type": "debugpy",
|
"type": "python",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"module": "freqtrade",
|
"module": "freqtrade",
|
||||||
"console": "integratedTerminal",
|
"console": "integratedTerminal",
|
||||||
@@ -102,21 +95,8 @@ This method can also be used to debug a strategy, by setting the breakpoints wit
|
|||||||
|
|
||||||
A similar setup can also be taken for Pycharm - using `freqtrade` as module name, and setting the command line arguments as "parameters".
|
A similar setup can also be taken for Pycharm - using `freqtrade` as module name, and setting the command line arguments as "parameters".
|
||||||
|
|
||||||
??? Tip "Correct venv usage"
|
|
||||||
When using a virtual environment (which you should), make sure that your Editor is using the correct virtual environment to avoid problems or "unknown import" errors.
|
|
||||||
|
|
||||||
#### Vscode
|
|
||||||
|
|
||||||
You can select the correct environment in VSCode with the command "Python: Select Interpreter" - which will show you environments the extension detected.
|
|
||||||
If your environment has not been detected, you can also pick a path manually.
|
|
||||||
|
|
||||||
#### Pycharm
|
|
||||||
|
|
||||||
In pycharm, you can select the appropriate Environment in the "Run/Debug Configurations" window.
|
|
||||||

|
|
||||||
|
|
||||||
!!! Note "Startup directory"
|
!!! Note "Startup directory"
|
||||||
This assumes that you have the repository checked out, and the editor is started at the repository root level (so pyproject.toml is at the top level of your repository).
|
This assumes that you have the repository checked out, and the editor is started at the repository root level (so setup.py is at the top level of your repository).
|
||||||
|
|
||||||
## ErrorHandling
|
## ErrorHandling
|
||||||
|
|
||||||
@@ -129,8 +109,6 @@ Below is an outline of exception inheritance hierarchy:
|
|||||||
+ FreqtradeException
|
+ FreqtradeException
|
||||||
|
|
|
|
||||||
+---+ OperationalException
|
+---+ OperationalException
|
||||||
| |
|
|
||||||
| +---+ ConfigurationError
|
|
||||||
|
|
|
|
||||||
+---+ DependencyException
|
+---+ DependencyException
|
||||||
| |
|
| |
|
||||||
@@ -162,7 +140,7 @@ Hopefully you also want to contribute this back upstream.
|
|||||||
|
|
||||||
Whatever your motivations are - This should get you off the ground in trying to develop a new Pairlist Handler.
|
Whatever your motivations are - This should get you off the ground in trying to develop a new Pairlist Handler.
|
||||||
|
|
||||||
First of all, have a look at the [VolumePairList](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/plugins/pairlist/VolumePairList.py) Handler, and best copy this file with a name of your new Pairlist Handler.
|
First of all, have a look at the [VolumePairList](https://github.com/freqtrade/freqtrade/blob/develop/freqtrade/pairlist/VolumePairList.py) Handler, and best copy this file with a name of your new Pairlist Handler.
|
||||||
|
|
||||||
This is a simple Handler, which however serves as a good example on how to start developing.
|
This is a simple Handler, which however serves as a good example on how to start developing.
|
||||||
|
|
||||||
@@ -205,7 +183,7 @@ This is called with each iteration of the bot (only if the Pairlist Handler is a
|
|||||||
|
|
||||||
It must return the resulting pairlist (which may then be passed into the chain of Pairlist Handlers).
|
It must return the resulting pairlist (which may then be passed into the chain of Pairlist Handlers).
|
||||||
|
|
||||||
Validations are optional, the parent class exposes a `verify_blacklist(pairlist)` and `_whitelist_for_active_markets(pairlist)` to do default filtering. Use this if you limit your result to a certain number of pairs - so the end-result is not shorter than expected.
|
Validations are optional, the parent class exposes a `_verify_blacklist(pairlist)` and `_whitelist_for_active_markets(pairlist)` to do default filtering. Use this if you limit your result to a certain number of pairs - so the end-result is not shorter than expected.
|
||||||
|
|
||||||
#### filter_pairlist
|
#### filter_pairlist
|
||||||
|
|
||||||
@@ -219,14 +197,14 @@ The default implementation in the base class simply calls the `_validate_pair()`
|
|||||||
|
|
||||||
If overridden, it must return the resulting pairlist (which may then be passed into the next Pairlist Handler in the chain).
|
If overridden, it must return the resulting pairlist (which may then be passed into the next Pairlist Handler in the chain).
|
||||||
|
|
||||||
Validations are optional, the parent class exposes a `verify_blacklist(pairlist)` and `_whitelist_for_active_markets(pairlist)` to do default filters. Use this if you limit your result to a certain number of pairs - so the end result is not shorter than expected.
|
Validations are optional, the parent class exposes a `_verify_blacklist(pairlist)` and `_whitelist_for_active_markets(pairlist)` to do default filters. Use this if you limit your result to a certain number of pairs - so the end result is not shorter than expected.
|
||||||
|
|
||||||
In `VolumePairList`, this implements different methods of sorting, does early validation so only the expected number of pairs is returned.
|
In `VolumePairList`, this implements different methods of sorting, does early validation so only the expected number of pairs is returned.
|
||||||
|
|
||||||
##### sample
|
##### sample
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
def filter_pairlist(self, pairlist: list[str], tickers: dict) -> List[str]:
|
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
|
||||||
# Generate dynamic whitelist
|
# Generate dynamic whitelist
|
||||||
pairs = self._calculate_pairlist(pairlist, tickers)
|
pairs = self._calculate_pairlist(pairlist, tickers)
|
||||||
return pairs
|
return pairs
|
||||||
@@ -241,6 +219,7 @@ No protection should use datetime directly, but use the provided `date_now` vari
|
|||||||
|
|
||||||
!!! Tip "Writing a new Protection"
|
!!! Tip "Writing a new Protection"
|
||||||
Best copy one of the existing Protections to have a good example.
|
Best copy one of the existing Protections to have a good example.
|
||||||
|
Don't forget to register your protection in `constants.py` under the variable `AVAILABLE_PROTECTIONS` - otherwise it will not be selectable.
|
||||||
|
|
||||||
#### Implementation of a new protection
|
#### Implementation of a new protection
|
||||||
|
|
||||||
@@ -260,7 +239,7 @@ For that reason, they must implement the following methods:
|
|||||||
|
|
||||||
The `until` portion should be calculated using the provided `calculate_lock_end()` method.
|
The `until` portion should be calculated using the provided `calculate_lock_end()` method.
|
||||||
|
|
||||||
All Protections should use `"stop_duration"` / `"stop_duration_candles"` to define how long a pair (or all pairs) should be locked.
|
All Protections should use `"stop_duration"` / `"stop_duration_candles"` to define how long a a pair (or all pairs) should be locked.
|
||||||
The content of this is made available as `self._stop_duration` to the each Protection.
|
The content of this is made available as `self._stop_duration` to the each Protection.
|
||||||
|
|
||||||
If your protection requires a look-back period, please use `"lookback_period"` / `"lockback_period_candles"` to keep all protections aligned.
|
If your protection requires a look-back period, please use `"lookback_period"` / `"lockback_period_candles"` to keep all protections aligned.
|
||||||
@@ -304,7 +283,7 @@ The `IProtection` parent class provides a helper method for this in `calculate_l
|
|||||||
|
|
||||||
Most exchanges supported by CCXT should work out of the box.
|
Most exchanges supported by CCXT should work out of the box.
|
||||||
|
|
||||||
To quickly test the public endpoints of an exchange, add a configuration for your exchange to `tests/exchange_online/conftest.py` and run these tests with `pytest --longrun tests/exchange_online/test_ccxt_compat.py`.
|
To quickly test the public endpoints of an exchange, add a configuration for your exchange to `test_ccxt_compat.py` and run these tests with `pytest --longrun tests/exchange/test_ccxt_compat.py`.
|
||||||
Completing these tests successfully a good basis point (it's a requirement, actually), however these won't guarantee correct exchange functioning, as this only tests public endpoints, but no private endpoint (like generate order or similar).
|
Completing these tests successfully a good basis point (it's a requirement, actually), however these won't guarantee correct exchange functioning, as this only tests public endpoints, but no private endpoint (like generate order or similar).
|
||||||
|
|
||||||
Also try to use `freqtrade download-data` for an extended timerange (multiple months) and verify that the data downloaded correctly (no holes, the specified timerange was actually downloaded).
|
Also try to use `freqtrade download-data` for an extended timerange (multiple months) and verify that the data downloaded correctly (no holes, the specified timerange was actually downloaded).
|
||||||
@@ -319,7 +298,6 @@ Additional tests / steps to complete:
|
|||||||
* Check if balance shows correctly (*)
|
* Check if balance shows correctly (*)
|
||||||
* Create market order (*)
|
* Create market order (*)
|
||||||
* Create limit order (*)
|
* Create limit order (*)
|
||||||
* Cancel order (*)
|
|
||||||
* Complete trade (enter + exit) (*)
|
* Complete trade (enter + exit) (*)
|
||||||
* Compare result calculation between exchange and bot
|
* Compare result calculation between exchange and bot
|
||||||
* Ensure fees are applied correctly (check the database against the exchange)
|
* Ensure fees are applied correctly (check the database against the exchange)
|
||||||
@@ -342,18 +320,18 @@ To check how the new exchange behaves, you can use the following snippet:
|
|||||||
|
|
||||||
``` python
|
``` python
|
||||||
import ccxt
|
import ccxt
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime
|
||||||
from freqtrade.data.converter import ohlcv_to_dataframe
|
from freqtrade.data.converter import ohlcv_to_dataframe
|
||||||
ct = ccxt.binance() # Use the exchange you're testing
|
ct = ccxt.binance()
|
||||||
timeframe = "1d"
|
timeframe = "1d"
|
||||||
pair = "BTC/USDT" # Make sure to use a pair that exists on that exchange!
|
pair = "XLM/BTC" # Make sure to use a pair that exists on that exchange!
|
||||||
raw = ct.fetch_ohlcv(pair, timeframe=timeframe)
|
raw = ct.fetch_ohlcv(pair, timeframe=timeframe)
|
||||||
|
|
||||||
# convert to dataframe
|
# convert to dataframe
|
||||||
df1 = ohlcv_to_dataframe(raw, timeframe, pair=pair, drop_incomplete=False)
|
df1 = ohlcv_to_dataframe(raw, timeframe, pair=pair, drop_incomplete=False)
|
||||||
|
|
||||||
print(df1.tail(1))
|
print(df1.tail(1))
|
||||||
print(datetime.now(timezone.utc))
|
print(datetime.utcnow())
|
||||||
```
|
```
|
||||||
|
|
||||||
``` output
|
``` output
|
||||||
@@ -377,8 +355,8 @@ from pathlib import Path
|
|||||||
|
|
||||||
exchange = ccxt.binance({
|
exchange = ccxt.binance({
|
||||||
'apiKey': '<apikey>',
|
'apiKey': '<apikey>',
|
||||||
'secret': '<secret>',
|
'secret': '<secret>'
|
||||||
'options': {'defaultType': 'swap'}
|
'options': {'defaultType': 'future'}
|
||||||
})
|
})
|
||||||
_ = exchange.load_markets()
|
_ = exchange.load_markets()
|
||||||
|
|
||||||
@@ -420,9 +398,6 @@ This part of the documentation is aimed at maintainers, and shows how to create
|
|||||||
|
|
||||||
### Create release branch
|
### Create release branch
|
||||||
|
|
||||||
!!! Note
|
|
||||||
Make sure that the `stable` branch is up-to-date!
|
|
||||||
|
|
||||||
First, pick a commit that's about one week old (to not include latest additions to releases).
|
First, pick a commit that's about one week old (to not include latest additions to releases).
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
@@ -435,11 +410,14 @@ Determine if crucial bugfixes have been made between this commit and the current
|
|||||||
* Merge the release branch (stable) into this branch.
|
* Merge the release branch (stable) into this branch.
|
||||||
* Edit `freqtrade/__init__.py` and add the version matching the current date (for example `2019.7` for July 2019). Minor versions can be `2019.7.1` should we need to do a second release that month. Version numbers must follow allowed versions from PEP0440 to avoid failures pushing to pypi.
|
* Edit `freqtrade/__init__.py` and add the version matching the current date (for example `2019.7` for July 2019). Minor versions can be `2019.7.1` should we need to do a second release that month. Version numbers must follow allowed versions from PEP0440 to avoid failures pushing to pypi.
|
||||||
* Commit this part.
|
* Commit this part.
|
||||||
* Push that branch to the remote and create a PR against the **stable branch**.
|
* push that branch to the remote and create a PR against the stable branch.
|
||||||
* Update develop version to next version following the pattern `2019.8-dev`.
|
* Update develop version to next version following the pattern `2019.8-dev`.
|
||||||
|
|
||||||
### Create changelog from git commits
|
### Create changelog from git commits
|
||||||
|
|
||||||
|
!!! Note
|
||||||
|
Make sure that the `stable` branch is up-to-date!
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# Needs to be done before merging / pulling that branch.
|
# Needs to be done before merging / pulling that branch.
|
||||||
git log --oneline --no-decorate --no-merges stable..new_release
|
git log --oneline --no-decorate --no-merges stable..new_release
|
||||||
@@ -456,11 +434,6 @@ To keep the release-log short, best wrap the full git changelog into a collapsib
|
|||||||
</details>
|
</details>
|
||||||
```
|
```
|
||||||
|
|
||||||
### FreqUI release
|
|
||||||
|
|
||||||
If FreqUI has been updated substantially, make sure to create a release before merging the release branch.
|
|
||||||
Make sure that freqUI CI on the release is finished and passed before merging the release.
|
|
||||||
|
|
||||||
### Create github release / tag
|
### Create github release / tag
|
||||||
|
|
||||||
Once the PR against stable is merged (best right after merging):
|
Once the PR against stable is merged (best right after merging):
|
||||||
@@ -468,30 +441,21 @@ Once the PR against stable is merged (best right after merging):
|
|||||||
* Use the button "Draft a new release" in the Github UI (subsection releases).
|
* Use the button "Draft a new release" in the Github UI (subsection releases).
|
||||||
* Use the version-number specified as tag.
|
* Use the version-number specified as tag.
|
||||||
* Use "stable" as reference (this step comes after the above PR is merged).
|
* Use "stable" as reference (this step comes after the above PR is merged).
|
||||||
* Use the above changelog as release comment (as codeblock).
|
* Use the above changelog as release comment (as codeblock)
|
||||||
* Use the below snippet for the new release
|
|
||||||
|
|
||||||
??? Tip "Release template"
|
|
||||||
````
|
|
||||||
--8<-- "includes/release_template.md"
|
|
||||||
````
|
|
||||||
|
|
||||||
## Releases
|
## Releases
|
||||||
|
|
||||||
### pypi
|
### pypi
|
||||||
|
|
||||||
!!! Warning "Manual Releases"
|
!!! Note
|
||||||
This process is automated as part of Github Actions.
|
This process is now automated as part of Github Actions.
|
||||||
Manual pypi pushes should not be necessary.
|
|
||||||
|
|
||||||
??? example "Manual release"
|
To create a pypi release, please run the following commands:
|
||||||
To manually create a pypi release, please run the following commands:
|
|
||||||
|
|
||||||
Additional requirement: `wheel`, `twine` (for uploading), account on pypi with proper permissions.
|
Additional requirement: `wheel`, `twine` (for uploading), account on pypi with proper permissions.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
pip install -U build
|
python setup.py sdist bdist_wheel
|
||||||
python -m build --sdist --wheel
|
|
||||||
|
|
||||||
# For pypi test (to check if some change to the installation did work)
|
# For pypi test (to check if some change to the installation did work)
|
||||||
twine upload --repository-url https://test.pypi.org/legacy/ dist/*
|
twine upload --repository-url https://test.pypi.org/legacy/ dist/*
|
||||||
|
|||||||
@@ -4,25 +4,20 @@ This page explains how to run the bot with Docker. It is not meant to work out o
|
|||||||
|
|
||||||
## Install Docker
|
## Install Docker
|
||||||
|
|
||||||
Start by downloading and installing Docker / Docker Desktop for your platform:
|
Start by downloading and installing Docker CE for your platform:
|
||||||
|
|
||||||
* [Mac](https://docs.docker.com/docker-for-mac/install/)
|
* [Mac](https://docs.docker.com/docker-for-mac/install/)
|
||||||
* [Windows](https://docs.docker.com/docker-for-windows/install/)
|
* [Windows](https://docs.docker.com/docker-for-windows/install/)
|
||||||
* [Linux](https://docs.docker.com/install/)
|
* [Linux](https://docs.docker.com/install/)
|
||||||
|
|
||||||
!!! Info "Docker compose install"
|
To simplify running freqtrade, [`docker-compose`](https://docs.docker.com/compose/install/) should be installed and available to follow the below [docker quick start guide](#docker-quick-start).
|
||||||
Freqtrade documentation assumes the use of Docker desktop (or the docker compose plugin).
|
|
||||||
While the docker-compose standalone installation still works, it will require changing all `docker compose` commands from `docker compose` to `docker-compose` to work (e.g. `docker compose up -d` will become `docker-compose up -d`).
|
|
||||||
|
|
||||||
??? Warning "Docker on windows"
|
## Freqtrade with docker-compose
|
||||||
If you just installed docker on a windows system, make sure to reboot your system, otherwise you might encounter unexplainable Problems related to network connectivity to docker containers.
|
|
||||||
|
|
||||||
## Freqtrade with docker
|
Freqtrade provides an official Docker image on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/), as well as a [docker-compose file](https://github.com/freqtrade/freqtrade/blob/stable/docker-compose.yml) ready for usage.
|
||||||
|
|
||||||
Freqtrade provides an official Docker image on [Dockerhub](https://hub.docker.com/r/freqtradeorg/freqtrade/), as well as a [docker compose file](https://github.com/freqtrade/freqtrade/blob/stable/docker-compose.yml) ready for usage.
|
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
- The following section assumes that `docker` is installed and available to the logged in user.
|
- The following section assumes that `docker` and `docker-compose` are installed and available to the logged in user.
|
||||||
- All below commands use relative directories and will have to be executed from the directory containing the `docker-compose.yml` file.
|
- All below commands use relative directories and will have to be executed from the directory containing the `docker-compose.yml` file.
|
||||||
|
|
||||||
### Docker quick start
|
### Docker quick start
|
||||||
@@ -36,13 +31,13 @@ cd ft_userdata/
|
|||||||
curl https://raw.githubusercontent.com/freqtrade/freqtrade/stable/docker-compose.yml -o docker-compose.yml
|
curl https://raw.githubusercontent.com/freqtrade/freqtrade/stable/docker-compose.yml -o docker-compose.yml
|
||||||
|
|
||||||
# Pull the freqtrade image
|
# Pull the freqtrade image
|
||||||
docker compose pull
|
docker-compose pull
|
||||||
|
|
||||||
# Create user directory structure
|
# Create user directory structure
|
||||||
docker compose run --rm freqtrade create-userdir --userdir user_data
|
docker-compose run --rm freqtrade create-userdir --userdir user_data
|
||||||
|
|
||||||
# Create configuration - Requires answering interactive questions
|
# Create configuration - Requires answering interactive questions
|
||||||
docker compose run --rm freqtrade new-config --config user_data/config.json
|
docker-compose run --rm freqtrade new-config --config user_data/config.json
|
||||||
```
|
```
|
||||||
|
|
||||||
The above snippet creates a new directory called `ft_userdata`, downloads the latest compose file and pulls the freqtrade image.
|
The above snippet creates a new directory called `ft_userdata`, downloads the latest compose file and pulls the freqtrade image.
|
||||||
@@ -69,7 +64,7 @@ The `SampleStrategy` is run by default.
|
|||||||
Once this is done, you're ready to launch the bot in trading mode (Dry-run or Live-trading, depending on your answer to the corresponding question you made above).
|
Once this is done, you're ready to launch the bot in trading mode (Dry-run or Live-trading, depending on your answer to the corresponding question you made above).
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
docker compose up -d
|
docker-compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Warning "Default configuration"
|
!!! Warning "Default configuration"
|
||||||
@@ -81,7 +76,7 @@ If you've selected to enable FreqUI in the `new-config` step, you will have freq
|
|||||||
|
|
||||||
You can now access the UI by typing localhost:8080 in your browser.
|
You can now access the UI by typing localhost:8080 in your browser.
|
||||||
|
|
||||||
??? Note "UI Access on a remote server"
|
??? Note "UI Access on a remote servers"
|
||||||
If you're running on a VPS, you should consider using either a ssh tunnel, or setup a VPN (openVPN, wireguard) to connect to your bot.
|
If you're running on a VPS, you should consider using either a ssh tunnel, or setup a VPN (openVPN, wireguard) to connect to your bot.
|
||||||
This will ensure that freqUI is not directly exposed to the internet, which is not recommended for security reasons (freqUI does not support https out of the box).
|
This will ensure that freqUI is not directly exposed to the internet, which is not recommended for security reasons (freqUI does not support https out of the box).
|
||||||
Setup of these tools is not part of this tutorial, however many good tutorials can be found on the internet.
|
Setup of these tools is not part of this tutorial, however many good tutorials can be found on the internet.
|
||||||
@@ -89,27 +84,27 @@ You can now access the UI by typing localhost:8080 in your browser.
|
|||||||
|
|
||||||
#### Monitoring the bot
|
#### Monitoring the bot
|
||||||
|
|
||||||
You can check for running instances with `docker compose ps`.
|
You can check for running instances with `docker-compose ps`.
|
||||||
This should list the service `freqtrade` as `running`. If that's not the case, best check the logs (see next point).
|
This should list the service `freqtrade` as `running`. If that's not the case, best check the logs (see next point).
|
||||||
|
|
||||||
#### Docker compose logs
|
#### Docker-compose logs
|
||||||
|
|
||||||
Logs will be written to: `user_data/logs/freqtrade.log`.
|
Logs will be written to: `user_data/logs/freqtrade.log`.
|
||||||
You can also check the latest log with the command `docker compose logs -f`.
|
You can also check the latest log with the command `docker-compose logs -f`.
|
||||||
|
|
||||||
#### Database
|
#### Database
|
||||||
|
|
||||||
The database will be located at: `user_data/tradesv3.sqlite`
|
The database will be located at: `user_data/tradesv3.sqlite`
|
||||||
|
|
||||||
#### Updating freqtrade with docker
|
#### Updating freqtrade with docker-compose
|
||||||
|
|
||||||
Updating freqtrade when using `docker` is as simple as running the following 2 commands:
|
Updating freqtrade when using `docker-compose` is as simple as running the following 2 commands:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
# Download the latest image
|
# Download the latest image
|
||||||
docker compose pull
|
docker-compose pull
|
||||||
# Restart the image
|
# Restart the image
|
||||||
docker compose up -d
|
docker-compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
This will first pull the latest image, and will then restart the container with the just pulled version.
|
This will first pull the latest image, and will then restart the container with the just pulled version.
|
||||||
@@ -121,43 +116,43 @@ This will first pull the latest image, and will then restart the container with
|
|||||||
|
|
||||||
Advanced users may edit the docker-compose file further to include all possible options or arguments.
|
Advanced users may edit the docker-compose file further to include all possible options or arguments.
|
||||||
|
|
||||||
All freqtrade arguments will be available by running `docker compose run --rm freqtrade <command> <optional arguments>`.
|
All freqtrade arguments will be available by running `docker-compose run --rm freqtrade <command> <optional arguments>`.
|
||||||
|
|
||||||
!!! Warning "`docker compose` for trade commands"
|
!!! Warning "`docker-compose` for trade commands"
|
||||||
Trade commands (`freqtrade trade <...>`) should not be ran via `docker compose run` - but should use `docker compose up -d` instead.
|
Trade commands (`freqtrade trade <...>`) should not be ran via `docker-compose run` - but should use `docker-compose up -d` instead.
|
||||||
This makes sure that the container is properly started (including port forwardings) and will make sure that the container will restart after a system reboot.
|
This makes sure that the container is properly started (including port forwardings) and will make sure that the container will restart after a system reboot.
|
||||||
If you intend to use freqUI, please also ensure to adjust the [configuration accordingly](rest-api.md#configuration-with-docker), otherwise the UI will not be available.
|
If you intend to use freqUI, please also ensure to adjust the [configuration accordingly](rest-api.md#configuration-with-docker), otherwise the UI will not be available.
|
||||||
|
|
||||||
!!! Note "`docker compose run --rm`"
|
!!! Note "`docker-compose run --rm`"
|
||||||
Including `--rm` will remove the container after completion, and is highly recommended for all modes except trading mode (running with `freqtrade trade` command).
|
Including `--rm` will remove the container after completion, and is highly recommended for all modes except trading mode (running with `freqtrade trade` command).
|
||||||
|
|
||||||
??? Note "Using docker without docker compose"
|
??? Note "Using docker without docker-compose"
|
||||||
"`docker compose run --rm`" will require a compose file to be provided.
|
"`docker-compose run --rm`" will require a compose file to be provided.
|
||||||
Some freqtrade commands that don't require authentication such as `list-pairs` can be run with "`docker run --rm`" instead.
|
Some freqtrade commands that don't require authentication such as `list-pairs` can be run with "`docker run --rm`" instead.
|
||||||
For example `docker run --rm freqtradeorg/freqtrade:stable list-pairs --exchange binance --quote BTC --print-json`.
|
For example `docker run --rm freqtradeorg/freqtrade:stable list-pairs --exchange binance --quote BTC --print-json`.
|
||||||
This can be useful for fetching exchange information to add to your `config.json` without affecting your running containers.
|
This can be useful for fetching exchange information to add to your `config.json` without affecting your running containers.
|
||||||
|
|
||||||
#### Example: Download data with docker
|
#### Example: Download data with docker-compose
|
||||||
|
|
||||||
Download backtesting data for 5 days for the pair ETH/BTC and 1h timeframe from Binance. The data will be stored in the directory `user_data/data/` on the host.
|
Download backtesting data for 5 days for the pair ETH/BTC and 1h timeframe from Binance. The data will be stored in the directory `user_data/data/` on the host.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
docker compose run --rm freqtrade download-data --pairs ETH/BTC --exchange binance --days 5 -t 1h
|
docker-compose run --rm freqtrade download-data --pairs ETH/BTC --exchange binance --days 5 -t 1h
|
||||||
```
|
```
|
||||||
|
|
||||||
Head over to the [Data Downloading Documentation](data-download.md) for more details on downloading data.
|
Head over to the [Data Downloading Documentation](data-download.md) for more details on downloading data.
|
||||||
|
|
||||||
#### Example: Backtest with docker
|
#### Example: Backtest with docker-compose
|
||||||
|
|
||||||
Run backtesting in docker-containers for SampleStrategy and specified timerange of historical data, on 5m timeframe:
|
Run backtesting in docker-containers for SampleStrategy and specified timerange of historical data, on 5m timeframe:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
docker compose run --rm freqtrade backtesting --config user_data/config.json --strategy SampleStrategy --timerange 20190801-20191001 -i 5m
|
docker-compose run --rm freqtrade backtesting --config user_data/config.json --strategy SampleStrategy --timerange 20190801-20191001 -i 5m
|
||||||
```
|
```
|
||||||
|
|
||||||
Head over to the [Backtesting Documentation](backtesting.md) to learn more.
|
Head over to the [Backtesting Documentation](backtesting.md) to learn more.
|
||||||
|
|
||||||
### Additional dependencies with docker
|
### Additional dependencies with docker-compose
|
||||||
|
|
||||||
If your strategy requires dependencies not included in the default image - it will be necessary to build the image on your host.
|
If your strategy requires dependencies not included in the default image - it will be necessary to build the image on your host.
|
||||||
For this, please create a Dockerfile containing installation steps for the additional dependencies (have a look at [docker/Dockerfile.custom](https://github.com/freqtrade/freqtrade/blob/develop/docker/Dockerfile.custom) for an example).
|
For this, please create a Dockerfile containing installation steps for the additional dependencies (have a look at [docker/Dockerfile.custom](https://github.com/freqtrade/freqtrade/blob/develop/docker/Dockerfile.custom) for an example).
|
||||||
@@ -171,15 +166,15 @@ You'll then also need to modify the `docker-compose.yml` file and uncomment the
|
|||||||
dockerfile: "./Dockerfile.<yourextension>"
|
dockerfile: "./Dockerfile.<yourextension>"
|
||||||
```
|
```
|
||||||
|
|
||||||
You can then run `docker compose build --pull` to build the docker image, and run it using the commands described above.
|
You can then run `docker-compose build --pull` to build the docker image, and run it using the commands described above.
|
||||||
|
|
||||||
### Plotting with docker
|
### Plotting with docker-compose
|
||||||
|
|
||||||
Commands `freqtrade plot-profit` and `freqtrade plot-dataframe` ([Documentation](plotting.md)) are available by changing the image to `*_plot` in your `docker-compose.yml` file.
|
Commands `freqtrade plot-profit` and `freqtrade plot-dataframe` ([Documentation](plotting.md)) are available by changing the image to `*_plot` in your docker-compose.yml file.
|
||||||
You can then use these commands as follows:
|
You can then use these commands as follows:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
docker compose run --rm freqtrade plot-dataframe --strategy AwesomeStrategy -p BTC/ETH --timerange=20180801-20180805
|
docker-compose run --rm freqtrade plot-dataframe --strategy AwesomeStrategy -p BTC/ETH --timerange=20180801-20180805
|
||||||
```
|
```
|
||||||
|
|
||||||
The output will be stored in the `user_data/plot` directory, and can be opened with any modern browser.
|
The output will be stored in the `user_data/plot` directory, and can be opened with any modern browser.
|
||||||
@@ -190,7 +185,7 @@ Freqtrade provides a docker-compose file which starts up a jupyter lab server.
|
|||||||
You can run this server using the following command:
|
You can run this server using the following command:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
docker compose -f docker/docker-compose-jupyter.yml up
|
docker-compose -f docker/docker-compose-jupyter.yml up
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create a docker-container running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`.
|
This will create a docker-container running jupyter lab, which will be accessible using `https://127.0.0.1:8888/lab`.
|
||||||
@@ -199,7 +194,7 @@ Please use the link that's printed in the console after startup for simplified l
|
|||||||
Since part of this image is built on your machine, it is recommended to rebuild the image from time to time to keep freqtrade (and dependencies) up-to-date.
|
Since part of this image is built on your machine, it is recommended to rebuild the image from time to time to keep freqtrade (and dependencies) up-to-date.
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
docker compose -f docker/docker-compose-jupyter.yml build --no-cache
|
docker-compose -f docker/docker-compose-jupyter.yml build --no-cache
|
||||||
```
|
```
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
@@ -207,7 +202,7 @@ docker compose -f docker/docker-compose-jupyter.yml build --no-cache
|
|||||||
### Docker on Windows
|
### Docker on Windows
|
||||||
|
|
||||||
* Error: `"Timestamp for this request is outside of the recvWindow."`
|
* Error: `"Timestamp for this request is outside of the recvWindow."`
|
||||||
The market api requests require a synchronized clock but the time in the docker container shifts a bit over time into the past.
|
* The market api requests require a synchronized clock but the time in the docker container shifts a bit over time into the past.
|
||||||
To fix this issue temporarily you need to run `wsl --shutdown` and restart docker again (a popup on windows 10 will ask you to do so).
|
To fix this issue temporarily you need to run `wsl --shutdown` and restart docker again (a popup on windows 10 will ask you to do so).
|
||||||
A permanent solution is either to host the docker container on a linux host or restart the wsl from time to time with the scheduler.
|
A permanent solution is either to host the docker container on a linux host or restart the wsl from time to time with the scheduler.
|
||||||
|
|
||||||
@@ -217,10 +212,6 @@ docker compose -f docker/docker-compose-jupyter.yml build --no-cache
|
|||||||
start "" "C:\Program Files\Docker\Docker\Docker Desktop.exe"
|
start "" "C:\Program Files\Docker\Docker\Docker Desktop.exe"
|
||||||
```
|
```
|
||||||
|
|
||||||
* Cannot connect to the API (Windows)
|
|
||||||
If you're on windows and just installed Docker (desktop), make sure to reboot your System. Docker can have problems with network connectivity without a restart.
|
|
||||||
You should obviously also make sure to have your [settings](#accessing-the-ui) accordingly.
|
|
||||||
|
|
||||||
!!! Warning
|
!!! Warning
|
||||||
Due to the above, we do not recommend the usage of docker on windows for production setups, but only for experimentation, datadownload and backtesting.
|
Due to the above, we do not recommend the usage of docker on windows for production setups, but only for experimentation, datadownload and backtesting.
|
||||||
Best use a linux-VPS for running freqtrade reliably.
|
Best use a linux-VPS for running freqtrade reliably.
|
||||||
|
|||||||
@@ -2,10 +2,6 @@
|
|||||||
|
|
||||||
The `Edge Positioning` module uses probability to calculate your win rate and risk reward ratio. It will use these statistics to control your strategy trade entry points, position size and, stoploss.
|
The `Edge Positioning` module uses probability to calculate your win rate and risk reward ratio. It will use these statistics to control your strategy trade entry points, position size and, stoploss.
|
||||||
|
|
||||||
!!! Danger "Deprecated functionality"
|
|
||||||
`Edge positioning` (or short Edge) is currently in maintenance mode only (we keep existing functionality alive) and should be considered as deprecated.
|
|
||||||
It will currently not receive new features until either someone stepped forward to take up ownership of that module - or we'll decide to remove edge from freqtrade.
|
|
||||||
|
|
||||||
!!! Warning
|
!!! Warning
|
||||||
When using `Edge positioning` with a dynamic whitelist (VolumePairList), make sure to also use `AgeFilter` and set it to at least `calculate_since_number_of_days` to avoid problems with missing data.
|
When using `Edge positioning` with a dynamic whitelist (VolumePairList), make sure to also use `AgeFilter` and set it to at least `calculate_since_number_of_days` to avoid problems with missing data.
|
||||||
|
|
||||||
@@ -137,7 +133,7 @@ $$ R = \frac{\text{average_profit}}{\text{average_loss}} = \frac{\mu_{win}}{\mu_
|
|||||||
|
|
||||||
### Expectancy
|
### Expectancy
|
||||||
|
|
||||||
By combining the Win Rate $W$ and the Risk Reward ratio $R$ to create an expectancy ratio $E$. A expectance ratio is the expected return of the investment made in a trade. We can compute the value of $E$ as follows:
|
By combining the Win Rate $W$ and and the Risk Reward ratio $R$ to create an expectancy ratio $E$. A expectance ratio is the expected return of the investment made in a trade. We can compute the value of $E$ as follows:
|
||||||
|
|
||||||
$$E = R * W - L$$
|
$$E = R * W - L$$
|
||||||
|
|
||||||
|
|||||||
@@ -54,9 +54,6 @@ This configuration enables kraken, as well as rate-limiting to avoid bans from t
|
|||||||
|
|
||||||
## Binance
|
## Binance
|
||||||
|
|
||||||
!!! Warning "Server location and geo-ip restrictions"
|
|
||||||
Please be aware that Binance restricts API access regarding the server country. The current and non-exhaustive countries blocked are Canada, Malaysia, Netherlands and United States. Please go to [binance terms > b. Eligibility](https://www.binance.com/en/terms) to find up to date list.
|
|
||||||
|
|
||||||
Binance supports [time_in_force](configuration.md#understand-order_time_in_force).
|
Binance supports [time_in_force](configuration.md#understand-order_time_in_force).
|
||||||
|
|
||||||
!!! Tip "Stoploss on Exchange"
|
!!! Tip "Stoploss on Exchange"
|
||||||
@@ -68,8 +65,6 @@ Binance supports [time_in_force](configuration.md#understand-order_time_in_force
|
|||||||
For Binance, it is suggested to add `"BNB/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `BNB` on the account or unless you're willing to disable using `BNB` for fees.
|
For Binance, it is suggested to add `"BNB/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `BNB` on the account or unless you're willing to disable using `BNB` for fees.
|
||||||
Binance accounts may use `BNB` for fees, and if a trade happens to be on `BNB`, further trades may consume this position and make the initial BNB trade unsellable as the expected amount is not there anymore.
|
Binance accounts may use `BNB` for fees, and if a trade happens to be on `BNB`, further trades may consume this position and make the initial BNB trade unsellable as the expected amount is not there anymore.
|
||||||
|
|
||||||
If not enough `BNB` is available to cover transaction fees, then fees will not be covered by `BNB` and no fee reduction will occur. Freqtrade will never buy BNB to cover for fees. BNB needs to be bought and monitored manually to this end.
|
|
||||||
|
|
||||||
### Binance sites
|
### Binance sites
|
||||||
|
|
||||||
Binance has been split into 2, and users must use the correct ccxt exchange ID for their exchange, otherwise API keys are not recognized.
|
Binance has been split into 2, and users must use the correct ccxt exchange ID for their exchange, otherwise API keys are not recognized.
|
||||||
@@ -77,25 +72,6 @@ Binance has been split into 2, and users must use the correct ccxt exchange ID f
|
|||||||
* [binance.com](https://www.binance.com/) - International users. Use exchange id: `binance`.
|
* [binance.com](https://www.binance.com/) - International users. Use exchange id: `binance`.
|
||||||
* [binance.us](https://www.binance.us/) - US based users. Use exchange id: `binanceus`.
|
* [binance.us](https://www.binance.us/) - US based users. Use exchange id: `binanceus`.
|
||||||
|
|
||||||
### Binance RSA keys
|
|
||||||
|
|
||||||
Freqtrade supports binance RSA API keys.
|
|
||||||
|
|
||||||
We recommend to use them as environment variable.
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
export FREQTRADE__EXCHANGE__SECRET="$(cat ./rsa_binance.private)"
|
|
||||||
```
|
|
||||||
|
|
||||||
They can however also be configured via configuration file. Since json doesn't support multi-line strings, you'll have to replace all newlines with `\n` to have a valid json file.
|
|
||||||
|
|
||||||
``` json
|
|
||||||
// ...
|
|
||||||
"key": "<someapikey>",
|
|
||||||
"secret": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBABACAFQA<...>s8KX8=\n-----END PRIVATE KEY-----"
|
|
||||||
// ...
|
|
||||||
```
|
|
||||||
|
|
||||||
### Binance Futures
|
### Binance Futures
|
||||||
|
|
||||||
Binance has specific (unfortunately complex) [Futures Trading Quantitative Rules](https://www.binance.com/en/support/faq/4f462ebe6ff445d4a170be7d9e897272) which need to be followed, and which prohibit a too low stake-amount (among others) for too many orders.
|
Binance has specific (unfortunately complex) [Futures Trading Quantitative Rules](https://www.binance.com/en/support/faq/4f462ebe6ff445d4a170be7d9e897272) which need to be followed, and which prohibit a too low stake-amount (among others) for too many orders.
|
||||||
@@ -127,17 +103,8 @@ These settings will be checked on startup, and freqtrade will show an error if t
|
|||||||
|
|
||||||
Freqtrade will not attempt to change these settings.
|
Freqtrade will not attempt to change these settings.
|
||||||
|
|
||||||
## Bingx
|
|
||||||
|
|
||||||
BingX supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "IOC" (immediate-or-cancel) and "PO" (Post only) settings.
|
|
||||||
|
|
||||||
!!! Tip "Stoploss on Exchange"
|
|
||||||
Bingx supports `stoploss_on_exchange` and can use both stop-limit and stop-market orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
|
||||||
|
|
||||||
## Kraken
|
## Kraken
|
||||||
|
|
||||||
Kraken supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "IOC" (immediate-or-cancel) and "PO" (Post only) settings.
|
|
||||||
|
|
||||||
!!! Tip "Stoploss on Exchange"
|
!!! Tip "Stoploss on Exchange"
|
||||||
Kraken supports `stoploss_on_exchange` and can use both stop-loss-market and stop-loss-limit orders. It provides great advantages, so we recommend to benefit from it.
|
Kraken supports `stoploss_on_exchange` and can use both stop-loss-market and stop-loss-limit orders. It provides great advantages, so we recommend to benefit from it.
|
||||||
You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
||||||
@@ -147,41 +114,13 @@ Kraken supports [time_in_force](configuration.md#understand-order_time_in_force)
|
|||||||
The Kraken API does only provide 720 historic candles, which is sufficient for Freqtrade dry-run and live trade modes, but is a problem for backtesting.
|
The Kraken API does only provide 720 historic candles, which is sufficient for Freqtrade dry-run and live trade modes, but is a problem for backtesting.
|
||||||
To download data for the Kraken exchange, using `--dl-trades` is mandatory, otherwise the bot will download the same 720 candles over and over, and you'll not have enough backtest data.
|
To download data for the Kraken exchange, using `--dl-trades` is mandatory, otherwise the bot will download the same 720 candles over and over, and you'll not have enough backtest data.
|
||||||
|
|
||||||
To speed up downloading, you can download the [trades zip files](https://support.kraken.com/hc/en-us/articles/360047543791-Downloadable-historical-market-data-time-and-sales-) kraken provides.
|
Due to the heavy rate-limiting applied by Kraken, the following configuration section should be used to download data:
|
||||||
These are usually updated once per quarter. Freqtrade expects these files to be placed in `user_data/data/kraken/trades_csv`.
|
|
||||||
|
|
||||||
A structure as follows can make sense if using incremental files, with the "full" history in one directory, and incremental files in different directories.
|
``` json
|
||||||
The assumption for this mode is that the data is downloaded and unzipped keeping filenames as they are.
|
"ccxt_async_config": {
|
||||||
Duplicate content will be ignored (based on timestamp) - though the assumption is that there is no gap in the data.
|
"enableRateLimit": true,
|
||||||
|
"rateLimit": 3100
|
||||||
This means, if your "full" history ends in Q4 2022 - then both incremental updates Q1 2023 and Q2 2023 are available.
|
},
|
||||||
Not having this will lead to incomplete data, and therefore invalid results while using the data.
|
|
||||||
|
|
||||||
```
|
|
||||||
└── trades_csv
|
|
||||||
├── Kraken_full_history
|
|
||||||
│ ├── BCHEUR.csv
|
|
||||||
│ └── XBTEUR.csv
|
|
||||||
├── Kraken_Trading_History_Q1_2023
|
|
||||||
│ ├── BCHEUR.csv
|
|
||||||
│ └── XBTEUR.csv
|
|
||||||
└── Kraken_Trading_History_Q2_2023
|
|
||||||
├── BCHEUR.csv
|
|
||||||
└── XBTEUR.csv
|
|
||||||
```
|
|
||||||
|
|
||||||
You can convert these files into freqtrade files:
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
freqtrade convert-trade-data --exchange kraken --format-from kraken_csv --format-to feather
|
|
||||||
# Convert trade data to different ohlcv timeframes
|
|
||||||
freqtrade trades-to-ohlcv -p BTC/EUR BCH/EUR --exchange kraken -t 1m 5m 15m 1h
|
|
||||||
```
|
|
||||||
|
|
||||||
The converted data also makes downloading data possible, and will start the download after the latest loaded trade.
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
freqtrade download-data --exchange kraken --dl-trades -p BTC/EUR BCH/EUR
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Warning "Downloading data from kraken"
|
!!! Warning "Downloading data from kraken"
|
||||||
@@ -192,6 +131,68 @@ freqtrade download-data --exchange kraken --dl-trades -p BTC/EUR BCH/EUR
|
|||||||
Please pay attention that rateLimit configuration entry holds delay in milliseconds between requests, NOT requests\sec rate.
|
Please pay attention that rateLimit configuration entry holds delay in milliseconds between requests, NOT requests\sec rate.
|
||||||
So, in order to mitigate Kraken API "Rate limit exceeded" exception, this configuration should be increased, NOT decreased.
|
So, in order to mitigate Kraken API "Rate limit exceeded" exception, this configuration should be increased, NOT decreased.
|
||||||
|
|
||||||
|
## Bittrex
|
||||||
|
|
||||||
|
### Order types
|
||||||
|
|
||||||
|
Bittrex does not support market orders. If you have a message at the bot startup about this, you should change order type values set in your configuration and/or in the strategy from `"market"` to `"limit"`. See some more details on this [here in the FAQ](faq.md#im-getting-the-exchange-bittrex-does-not-support-market-orders-message-and-cannot-run-my-strategy).
|
||||||
|
|
||||||
|
Bittrex also does not support `VolumePairlist` due to limited / split API constellation at the moment.
|
||||||
|
Please use `StaticPairlist`. Other pairlists (other than `VolumePairlist`) should not be affected.
|
||||||
|
|
||||||
|
### Volume pairlist
|
||||||
|
|
||||||
|
Bittrex does not support the direct usage of VolumePairList. This can however be worked around by using the advanced mode with `lookback_days: 1` (or more), which will emulate 24h volume.
|
||||||
|
|
||||||
|
Read more in the [pairlist documentation](plugins.md#volumepairlist-advanced-mode).
|
||||||
|
|
||||||
|
### Restricted markets
|
||||||
|
|
||||||
|
Bittrex split its exchange into US and International versions.
|
||||||
|
The International version has more pairs available, however the API always returns all pairs, so there is currently no automated way to detect if you're affected by the restriction.
|
||||||
|
|
||||||
|
If you have restricted pairs in your whitelist, you'll get a warning message in the log on Freqtrade startup for each restricted pair.
|
||||||
|
|
||||||
|
The warning message will look similar to the following:
|
||||||
|
|
||||||
|
``` output
|
||||||
|
[...] Message: bittrex {"success":false,"message":"RESTRICTED_MARKET","result":null,"explanation":null}"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you're an "International" customer on the Bittrex exchange, then this warning will probably not impact you.
|
||||||
|
If you're a US customer, the bot will fail to create orders for these pairs, and you should remove them from your whitelist.
|
||||||
|
|
||||||
|
You can get a list of restricted markets by using the following snippet:
|
||||||
|
|
||||||
|
``` python
|
||||||
|
import ccxt
|
||||||
|
ct = ccxt.bittrex()
|
||||||
|
lm = ct.load_markets()
|
||||||
|
|
||||||
|
res = [p for p, x in lm.items() if 'US' in x['info']['prohibitedIn']]
|
||||||
|
print(res)
|
||||||
|
```
|
||||||
|
|
||||||
|
## FTX
|
||||||
|
|
||||||
|
!!! Tip "Stoploss on Exchange"
|
||||||
|
FTX supports `stoploss_on_exchange` and can use both stop-loss-market and stop-loss-limit orders. It provides great advantages, so we recommend to benefit from it.
|
||||||
|
You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type of stoploss shall be used.
|
||||||
|
|
||||||
|
### Using subaccounts
|
||||||
|
|
||||||
|
To use subaccounts with FTX, you need to edit the configuration and add the following:
|
||||||
|
|
||||||
|
``` json
|
||||||
|
"exchange": {
|
||||||
|
"ccxt_config": {
|
||||||
|
"headers": {
|
||||||
|
"FTX-SUBACCOUNT": "name"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Kucoin
|
## Kucoin
|
||||||
|
|
||||||
Kucoin requires a passphrase for each api key, you will therefore need to add this key into the configuration so your exchange section looks as follows:
|
Kucoin requires a passphrase for each api key, you will therefore need to add this key into the configuration so your exchange section looks as follows:
|
||||||
@@ -217,12 +218,12 @@ Kucoin supports [time_in_force](configuration.md#understand-order_time_in_force)
|
|||||||
For Kucoin, it is suggested to add `"KCS/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `KCS` on the account or unless you're willing to disable using `KCS` for fees.
|
For Kucoin, it is suggested to add `"KCS/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `KCS` on the account or unless you're willing to disable using `KCS` for fees.
|
||||||
Kucoin accounts may use `KCS` for fees, and if a trade happens to be on `KCS`, further trades may consume this position and make the initial `KCS` trade unsellable as the expected amount is not there anymore.
|
Kucoin accounts may use `KCS` for fees, and if a trade happens to be on `KCS`, further trades may consume this position and make the initial `KCS` trade unsellable as the expected amount is not there anymore.
|
||||||
|
|
||||||
## HTX
|
## Huobi
|
||||||
|
|
||||||
!!! Tip "Stoploss on Exchange"
|
!!! Tip "Stoploss on Exchange"
|
||||||
HTX supports `stoploss_on_exchange` and uses `stop-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
Huobi supports `stoploss_on_exchange` and uses `stop-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
||||||
|
|
||||||
## OKX
|
## OKX (former OKEX)
|
||||||
|
|
||||||
OKX requires a passphrase for each api key, you will therefore need to add this key into the configuration so your exchange section looks as follows:
|
OKX requires a passphrase for each api key, you will therefore need to add this key into the configuration so your exchange section looks as follows:
|
||||||
|
|
||||||
@@ -236,15 +237,12 @@ OKX requires a passphrase for each api key, you will therefore need to add this
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
If you've registered with OKX on the host my.okx.com (OKX EAA)- you will need to use `"myokx"` as the exchange name.
|
|
||||||
Using the wrong exchange will result in the error "OKX Error 50119: API key doesn't exist" - as the 2 are separate entities.
|
|
||||||
|
|
||||||
!!! Warning
|
!!! Warning
|
||||||
OKX only provides 100 candles per api call. Therefore, the strategy will only have a pretty low amount of data available in backtesting mode.
|
OKX only provides 100 candles per api call. Therefore, the strategy will only have a pretty low amount of data available in backtesting mode.
|
||||||
|
|
||||||
!!! Warning "Futures"
|
!!! Warning "Futures"
|
||||||
OKX Futures has the concept of "position mode" - which can be "Buy/Sell" or long/short (hedge mode).
|
OKX Futures has the concept of "position mode" - which can be Net or long/short (hedge mode).
|
||||||
Freqtrade supports both modes (we recommend to use Buy/Sell mode) - but changing the mode mid-trading is not supported and will lead to exceptions and failures to place trades.
|
Freqtrade supports both modes (we recommend to use net mode) - but changing the mode mid-trading is not supported and will lead to exceptions and failures to place trades.
|
||||||
OKX also only provides MARK candles for the past ~3 months. Backtesting futures prior to that date will therefore lead to slight deviations, as funding-fees cannot be calculated correctly without this data.
|
OKX also only provides MARK candles for the past ~3 months. Backtesting futures prior to that date will therefore lead to slight deviations, as funding-fees cannot be calculated correctly without this data.
|
||||||
|
|
||||||
## Gate.io
|
## Gate.io
|
||||||
@@ -255,93 +253,6 @@ Using the wrong exchange will result in the error "OKX Error 50119: API key does
|
|||||||
Gate.io allows the use of `POINT` to pay for fees. As this is not a tradable currency (no regular market available), automatic fee calculations will fail (and default to a fee of 0).
|
Gate.io allows the use of `POINT` to pay for fees. As this is not a tradable currency (no regular market available), automatic fee calculations will fail (and default to a fee of 0).
|
||||||
The configuration parameter `exchange.unknown_fee_rate` can be used to specify the exchange rate between Point and the stake currency. Obviously, changing the stake-currency will also require changes to this value.
|
The configuration parameter `exchange.unknown_fee_rate` can be used to specify the exchange rate between Point and the stake currency. Obviously, changing the stake-currency will also require changes to this value.
|
||||||
|
|
||||||
Gate API keys require the following permissions on top of the market type you want to trade:
|
|
||||||
|
|
||||||
* "Spot Trade" _or_ "Perpetual Futures" (Read and Write) (either select both, or the one matching the market you want to trade)
|
|
||||||
* "Wallet" (read only)
|
|
||||||
* "Account" (read only)
|
|
||||||
|
|
||||||
Without these permissions, the bot will not start correctly and show errors like "permission missing".
|
|
||||||
|
|
||||||
## Bybit
|
|
||||||
|
|
||||||
Futures trading on bybit is currently supported for USDT markets, and will use isolated futures mode.
|
|
||||||
|
|
||||||
On startup, freqtrade will set the position mode to "One-way Mode" for the whole (sub)account. This avoids making this call over and over again (slowing down bot operations), but means that changes to this setting may result in exceptions and errors.
|
|
||||||
|
|
||||||
As bybit doesn't provide funding rate history, the dry-run calculation is used for live trades as well.
|
|
||||||
|
|
||||||
API Keys for live futures trading must have the following permissions:
|
|
||||||
|
|
||||||
* Read-write
|
|
||||||
* Contract - Orders
|
|
||||||
* Contract - Positions
|
|
||||||
|
|
||||||
We do strongly recommend to limit all API keys to the IP you're going to use it from.
|
|
||||||
|
|
||||||
!!! Warning "Unified accounts"
|
|
||||||
Freqtrade assumes accounts to be dedicated to the bot.
|
|
||||||
We therefore recommend the usage of one subaccount per bot. This is especially important when using unified accounts.
|
|
||||||
Other configurations (multiple bots on one account, manual non-bot trades on the bot account) are not supported and may lead to unexpected behavior.
|
|
||||||
|
|
||||||
|
|
||||||
!!! Tip "Stoploss on Exchange"
|
|
||||||
Bybit (futures only) supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
|
||||||
On futures, Bybit supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
|
||||||
|
|
||||||
## Bitmart
|
|
||||||
|
|
||||||
Bitmart requires the API key Memo (the name you give the API key) to go along with the exchange key and secret.
|
|
||||||
It's therefore required to pass the UID as well.
|
|
||||||
|
|
||||||
```json
|
|
||||||
"exchange": {
|
|
||||||
"name": "bitmart",
|
|
||||||
"uid": "your_bitmart_api_key_memo",
|
|
||||||
"secret": "your_exchange_secret",
|
|
||||||
"password": "your_exchange_api_key_password",
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Warning "Necessary Verification"
|
|
||||||
Bitmart requires Verification Lvl2 to successfully trade on the spot market through the API - even though trading via UI works just fine with just Lvl1 verification.
|
|
||||||
|
|
||||||
## Hyperliquid
|
|
||||||
|
|
||||||
!!! Tip "Stoploss on Exchange"
|
|
||||||
Hyperliquid supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it.
|
|
||||||
|
|
||||||
Hyperliquid is a Decentralized Exchange (DEX). Decentralized exchanges work a bit different compared to normal exchanges. Instead of authenticating private API calls using an API key, private API calls need to be signed with the private key of your wallet (We recommend using an api Wallet for this, generated either on Hyperliquid or in your wallet of choice).
|
|
||||||
This needs to be configured like this:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"exchange": {
|
|
||||||
"name": "hyperliquid",
|
|
||||||
"walletAddress": "your_eth_wallet_address",
|
|
||||||
"privateKey": "your_api_private_key",
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* walletAddress in hex format: `0x<40 hex characters>` - Can be easily copied from your wallet - and should be your wallet address, not your API Wallet Address.
|
|
||||||
* privateKey in hex format: `0x<64 hex characters>` - Use the key the API Wallet shows on creation.
|
|
||||||
|
|
||||||
Hyperliquid handles deposits and withdrawals on the Arbitrum One chain, a Layer 2 scaling solution built on top of Ethereum. Hyperliquid uses USDC as quote / collateral. The process of depositing USDC on Hyperliquid requires a couple of steps, see [how to start trading](https://hyperliquid.gitbook.io/hyperliquid-docs/onboarding/how-to-start-trading) for details on what steps are needed.
|
|
||||||
|
|
||||||
!!! Note "Hyperliquid general usage Notes"
|
|
||||||
Hyperliquid does not support market orders, however ccxt will simulate market orders by placing limit orders with a maximum slippage of 5%.
|
|
||||||
Unfortunately, hyperliquid only offers 5000 historic candles, so backtesting will either need to build candles historically (by waiting and downloading the data incrementally over time) - or will be limited to the last 5000 candles.
|
|
||||||
|
|
||||||
!!! Info "Some general best practices (non exhaustive)"
|
|
||||||
* Beware of supply chain attacks, like pip package poisoning etcetera. Whenever you use your private key, make sure your environment is safe.
|
|
||||||
* Don't use your actual wallet private key for trading. Use the Hyperliquid [API generator](https://app.hyperliquid.xyz/API) to create a separate API wallet.
|
|
||||||
* Don't store your actual wallet private key on the server you use for freqtrade. Use the API wallet private key instead. This key won't allow withdrawals, only trading.
|
|
||||||
* Always keep your mnemonic phrase and private key private.
|
|
||||||
* Don't use the same mnemonic as the one you had to backup when initializing a hardware wallet, using the same mnemonic basically deletes the security of your hardware wallet.
|
|
||||||
* Create a different software wallet, only transfer the funds you want to trade with to that wallet, and use that wallet to trade on Hyperliquid.
|
|
||||||
* If you have funds you don't want to use for trading (after making a profit for example), transfer them back to your hardware wallet.
|
|
||||||
|
|
||||||
## All exchanges
|
## All exchanges
|
||||||
|
|
||||||
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.
|
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.
|
||||||
@@ -351,7 +262,7 @@ Should you experience constant errors with Nonce (like `InvalidNonce`), it is be
|
|||||||
* The Ocean (exchange id: `theocean`) exchange uses Web3 functionality and requires `web3` python package to be installed:
|
* The Ocean (exchange id: `theocean`) exchange uses Web3 functionality and requires `web3` python package to be installed:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
pip3 install web3
|
$ pip3 install web3
|
||||||
```
|
```
|
||||||
|
|
||||||
### Getting latest price / Incomplete candles
|
### Getting latest price / Incomplete candles
|
||||||
@@ -359,7 +270,7 @@ pip3 install web3
|
|||||||
Most exchanges return current incomplete candle via their OHLCV/klines API interface.
|
Most exchanges return current incomplete candle via their OHLCV/klines API interface.
|
||||||
By default, Freqtrade assumes that incomplete candle is fetched from the exchange and removes the last candle assuming it's the incomplete candle.
|
By default, Freqtrade assumes that incomplete candle is fetched from the exchange and removes the last candle assuming it's the incomplete candle.
|
||||||
|
|
||||||
Whether your exchange returns incomplete candles or not can be checked using [the helper script](developer.md#incomplete-candles) from the Contributor documentation.
|
Whether your exchange returns incomplete candles or not can be checked using [the helper script](developer.md#Incomplete-candles) from the Contributor documentation.
|
||||||
|
|
||||||
Due to the danger of repainting, Freqtrade does not allow you to use this incomplete candle.
|
Due to the danger of repainting, Freqtrade does not allow you to use this incomplete candle.
|
||||||
|
|
||||||
|
|||||||
80
docs/faq.md
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
## Supported Markets
|
## Supported Markets
|
||||||
|
|
||||||
Freqtrade supports spot trading, as well as (isolated) futures trading for some selected exchanges. Please refer to the [documentation start page](index.md#supported-futures-exchanges-experimental) for an up-to-date list of supported exchanges.
|
Freqtrade supports spot trading only.
|
||||||
|
|
||||||
### Can my bot open short positions?
|
### Can my bot open short positions?
|
||||||
|
|
||||||
@@ -14,13 +14,13 @@ In spot markets, you can in some cases use leveraged spot tokens, which reflect
|
|||||||
|
|
||||||
### Can my bot trade options or futures?
|
### Can my bot trade options or futures?
|
||||||
|
|
||||||
Futures trading is supported for selected exchanges. Please refer to the [documentation start page](index.md#supported-futures-exchanges-experimental) for an up-to-date list of supported exchanges.
|
Futures trading is supported for selected exchanges. Please refer to the [documentation start page](index.md#supported-futures-exchanges-experimental) for an uptodate list of supported exchanges.
|
||||||
|
|
||||||
## Beginner Tips & Tricks
|
## Beginner Tips & Tricks
|
||||||
|
|
||||||
* When you work with your strategy & hyperopt file you should use a proper code editor like VSCode or PyCharm. A good code editor will provide syntax highlighting as well as line numbers, making it easy to find syntax errors (most likely pointed out by Freqtrade during startup).
|
* When you work with your strategy & hyperopt file you should use a proper code editor like VSCode or PyCharm. A good code editor will provide syntax highlighting as well as line numbers, making it easy to find syntax errors (most likely pointed out by Freqtrade during startup).
|
||||||
|
|
||||||
## Freqtrade common questions
|
## Freqtrade common issues
|
||||||
|
|
||||||
### Can freqtrade open multiple positions on the same pair in parallel?
|
### Can freqtrade open multiple positions on the same pair in parallel?
|
||||||
|
|
||||||
@@ -36,14 +36,10 @@ Running the bot with `freqtrade trade --config config.json` shows the output `fr
|
|||||||
This could be caused by the following reasons:
|
This could be caused by the following reasons:
|
||||||
|
|
||||||
* The virtual environment is not active.
|
* The virtual environment is not active.
|
||||||
* Run `source .venv/bin/activate` to activate the virtual environment.
|
* Run `source .env/bin/activate` to activate the virtual environment.
|
||||||
* The installation did not complete successfully.
|
* The installation did not complete successfully.
|
||||||
* Please check the [Installation documentation](installation.md).
|
* Please check the [Installation documentation](installation.md).
|
||||||
|
|
||||||
### The bot starts, but in STOPPED mode
|
|
||||||
|
|
||||||
Make sure you set the `initial_state` config option to `"running"` in your config.json
|
|
||||||
|
|
||||||
### I have waited 5 minutes, why hasn't the bot made any trades yet?
|
### I have waited 5 minutes, why hasn't the bot made any trades yet?
|
||||||
|
|
||||||
* Depending on the buy strategy, the amount of whitelisted coins, the
|
* Depending on the buy strategy, the amount of whitelisted coins, the
|
||||||
@@ -82,14 +78,6 @@ Where possible (e.g. on binance), the use of the exchange's dedicated fee curren
|
|||||||
On binance, it's sufficient to have BNB in your account, and have "Pay fees in BNB" enabled in your profile. Your BNB balance will slowly decline (as it's used to pay fees) - but you'll no longer encounter dust (Freqtrade will include the fees in the profit calculations).
|
On binance, it's sufficient to have BNB in your account, and have "Pay fees in BNB" enabled in your profile. Your BNB balance will slowly decline (as it's used to pay fees) - but you'll no longer encounter dust (Freqtrade will include the fees in the profit calculations).
|
||||||
Other exchanges don't offer such possibilities, where it's simply something you'll have to accept or move to a different exchange.
|
Other exchanges don't offer such possibilities, where it's simply something you'll have to accept or move to a different exchange.
|
||||||
|
|
||||||
### I deposited more funds to the exchange, but my bot doesn't recognize this
|
|
||||||
|
|
||||||
Freqtrade will update the exchange balance when necessary (Before placing an order).
|
|
||||||
RPC calls (Telegram's `/balance`, API calls to `/balance`) can trigger an update at max. once per hour.
|
|
||||||
|
|
||||||
If `adjust_trade_position` is enabled (and the bot has open trades eligible for position adjustments) - then the wallets will be refreshed once per hour.
|
|
||||||
To force an immediate update, you can use `/reload_config` - which will restart the bot.
|
|
||||||
|
|
||||||
### I want to use incomplete candles
|
### I want to use incomplete candles
|
||||||
|
|
||||||
Freqtrade will not provide incomplete candles to strategies. Using incomplete candles will lead to repainting and consequently to strategies with "ghost" buys, which are impossible to both backtest, and verify after they happened.
|
Freqtrade will not provide incomplete candles to strategies. Using incomplete candles will lead to repainting and consequently to strategies with "ghost" buys, which are impossible to both backtest, and verify after they happened.
|
||||||
@@ -104,19 +92,6 @@ You can use the `/stopentry` command in Telegram to prevent future trade entry,
|
|||||||
|
|
||||||
Please look at the [advanced setup documentation Page](advanced-setup.md#running-multiple-instances-of-freqtrade).
|
Please look at the [advanced setup documentation Page](advanced-setup.md#running-multiple-instances-of-freqtrade).
|
||||||
|
|
||||||
### I'm getting "Impossible to load Strategy" when starting the bot
|
|
||||||
|
|
||||||
This error message is shown when the bot cannot load the strategy.
|
|
||||||
Usually, you can use `freqtrade list-strategies` to list all available strategies.
|
|
||||||
The output of this command will also include a status column, showing if the strategy can be loaded.
|
|
||||||
|
|
||||||
Please check the following:
|
|
||||||
|
|
||||||
* Are you using the correct strategy name? The strategy name is case-sensitive and must correspond to the Strategy class name (not the filename!).
|
|
||||||
* Is the strategy in the `user_data/strategies` directory, and has the file-ending `.py`?
|
|
||||||
* Does the bot show other warnings before this error? Maybe you're missing some dependencies for the strategy - which would be highlighted in the log.
|
|
||||||
* In case of docker - is the strategy directory mounted correctly (check the volumes part of the docker-compose file)?
|
|
||||||
|
|
||||||
### I'm getting "Missing data fillup" messages in the log
|
### I'm getting "Missing data fillup" messages in the log
|
||||||
|
|
||||||
This message is just a warning that the latest candles had missing candles in them.
|
This message is just a warning that the latest candles had missing candles in them.
|
||||||
@@ -127,16 +102,6 @@ If this happens for all pairs in the pairlist, this might indicate a recent exch
|
|||||||
|
|
||||||
Irrespectively of the reason, Freqtrade will fill up these candles with "empty" candles, where open, high, low and close are set to the previous candle close - and volume is empty. In a chart, this will look like a `_` - and is aligned with how exchanges usually represent 0 volume candles.
|
Irrespectively of the reason, Freqtrade will fill up these candles with "empty" candles, where open, high, low and close are set to the previous candle close - and volume is empty. In a chart, this will look like a `_` - and is aligned with how exchanges usually represent 0 volume candles.
|
||||||
|
|
||||||
### I'm getting "Price jump between 2 candles detected"
|
|
||||||
|
|
||||||
This message is a warning that the candles had a price jump of > 30%.
|
|
||||||
This might be a sign that the pair stopped trading, and some token exchange took place (e.g. COCOS in 2021 - where price jumped from 0.0000154 to 0.01621).
|
|
||||||
This message is often accompanied by ["Missing data fillup"](#im-getting-missing-data-fillup-messages-in-the-log) - as trading on such pairs is often stopped for some time.
|
|
||||||
|
|
||||||
### I want to reset the bot's database
|
|
||||||
|
|
||||||
To reset the bot's database, you can either delete the database (by default `tradesv3.sqlite` or `tradesv3.dryrun.sqlite`), or use a different database url via `--db-url` (e.g. `sqlite:///mynewdatabase.sqlite`).
|
|
||||||
|
|
||||||
### I'm getting "Outdated history for pair xxx" in the log
|
### I'm getting "Outdated history for pair xxx" in the log
|
||||||
|
|
||||||
The bot is trying to tell you that it got an outdated last candle (not the last complete candle).
|
The bot is trying to tell you that it got an outdated last candle (not the last complete candle).
|
||||||
@@ -149,9 +114,15 @@ This warning can point to one of the below problems:
|
|||||||
* Barely traded pair -> Check the pair on the exchange webpage, look at the timeframe your strategy uses. If the pair does not have any volume in some candles (usually visualized with a "volume 0" bar, and a "_" as candle), this pair did not have any trades in this timeframe. These pairs should ideally be avoided, as they can cause problems with order-filling.
|
* Barely traded pair -> Check the pair on the exchange webpage, look at the timeframe your strategy uses. If the pair does not have any volume in some candles (usually visualized with a "volume 0" bar, and a "_" as candle), this pair did not have any trades in this timeframe. These pairs should ideally be avoided, as they can cause problems with order-filling.
|
||||||
* API problem -> API returns wrong data (this only here for completeness, and should not happen with supported exchanges).
|
* API problem -> API returns wrong data (this only here for completeness, and should not happen with supported exchanges).
|
||||||
|
|
||||||
|
### I'm getting the "RESTRICTED_MARKET" message in the log
|
||||||
|
|
||||||
|
Currently known to happen for US Bittrex users.
|
||||||
|
|
||||||
|
Read [the Bittrex section about restricted markets](exchanges.md#restricted-markets) for more information.
|
||||||
|
|
||||||
### I'm getting the "Exchange XXX does not support market orders." message and cannot run my strategy
|
### I'm getting the "Exchange XXX does not support market orders." message and cannot run my strategy
|
||||||
|
|
||||||
As the message says, your exchange does not support market orders and you have one of the [order types](configuration.md/#understand-order_types) set to "market". Your strategy was probably written with other exchanges in mind and sets "market" orders for "stoploss" orders, which is correct and preferable for most of the exchanges supporting market orders (but not for Gate.io).
|
As the message says, your exchange does not support market orders and you have one of the [order types](configuration.md/#understand-order_types) set to "market". Your strategy was probably written with other exchanges in mind and sets "market" orders for "stoploss" orders, which is correct and preferable for most of the exchanges supporting market orders (but not for Bittrex and Gate.io).
|
||||||
|
|
||||||
To fix this, redefine order types in the strategy to use "limit" instead of "market":
|
To fix this, redefine order types in the strategy to use "limit" instead of "market":
|
||||||
|
|
||||||
@@ -165,13 +136,6 @@ To fix this, redefine order types in the strategy to use "limit" instead of "mar
|
|||||||
|
|
||||||
The same fix should be applied in the configuration file, if order types are defined in your custom config rather than in the strategy.
|
The same fix should be applied in the configuration file, if order types are defined in your custom config rather than in the strategy.
|
||||||
|
|
||||||
### I'm trying to start the bot live, but get an API permission error
|
|
||||||
|
|
||||||
Errors like `Invalid API-key, IP, or permissions for action` mean exactly what they actually say.
|
|
||||||
Your API key is either invalid (copy/paste error? check for leading/trailing spaces in the config), expired, or the IP you're running the bot from is not enabled in the Exchange's API console.
|
|
||||||
Usually, the permission "Spot Trading" (or the equivalent in the exchange you use) will be necessary.
|
|
||||||
Futures will usually have to be enabled specifically.
|
|
||||||
|
|
||||||
### How do I search the bot logs for something?
|
### How do I search the bot logs for something?
|
||||||
|
|
||||||
By default, the bot writes its log into stderr stream. This is implemented this way so that you can easily separate the bot's diagnostics messages from Backtesting, Edge and Hyperopt results, output from other various Freqtrade utility sub-commands, as well as from the output of your custom `print()`'s you may have inserted into your strategy. So if you need to search the log messages with the grep utility, you need to redirect stderr to stdout and disregard stdout.
|
By default, the bot writes its log into stderr stream. This is implemented this way so that you can easily separate the bot's diagnostics messages from Backtesting, Edge and Hyperopt results, output from other various Freqtrade utility sub-commands, as well as from the output of your custom `print()`'s you may have inserted into your strategy. So if you need to search the log messages with the grep utility, you need to redirect stderr to stdout and disregard stdout.
|
||||||
@@ -278,26 +242,8 @@ The Edge module is mostly a result of brainstorming of [@mishaker](https://githu
|
|||||||
You can find further info on expectancy, win rate, risk management and position size in the following sources:
|
You can find further info on expectancy, win rate, risk management and position size in the following sources:
|
||||||
|
|
||||||
- https://www.tradeciety.com/ultimate-math-guide-for-traders/
|
- https://www.tradeciety.com/ultimate-math-guide-for-traders/
|
||||||
|
- http://www.vantharp.com/tharp-concepts/expectancy.asp
|
||||||
- https://samuraitradingacademy.com/trading-expectancy/
|
- https://samuraitradingacademy.com/trading-expectancy/
|
||||||
- https://www.learningmarkets.com/determining-expectancy-in-your-trading/
|
- https://www.learningmarkets.com/determining-expectancy-in-your-trading/
|
||||||
- https://www.lonestocktrader.com/make-money-trading-positive-expectancy/
|
- http://www.lonestocktrader.com/make-money-trading-positive-expectancy/
|
||||||
- https://www.babypips.com/trading/trade-expectancy-matter
|
- https://www.babypips.com/trading/trade-expectancy-matter
|
||||||
|
|
||||||
## Official channels
|
|
||||||
|
|
||||||
Freqtrade is using exclusively the following official channels:
|
|
||||||
|
|
||||||
* [Freqtrade discord server](https://discord.gg/p7nuUNVfP7)
|
|
||||||
* [Freqtrade documentation (https://freqtrade.io)](https://freqtrade.io)
|
|
||||||
* [Freqtrade github organization](https://github.com/freqtrade)
|
|
||||||
|
|
||||||
Nobody affiliated with the freqtrade project will ask you about your exchange keys or anything else exposing your funds to exploitation.
|
|
||||||
Should you be asked to expose your exchange keys or send funds to some random wallet, then please don't follow these instructions.
|
|
||||||
|
|
||||||
Failing to follow these guidelines will not be responsibility of freqtrade.
|
|
||||||
|
|
||||||
## "Freqtrade token"
|
|
||||||
|
|
||||||
Freqtrade does not have a Crypto token offering.
|
|
||||||
|
|
||||||
Token offerings you find on the internet referring Freqtrade, FreqAI or freqUI must be considered to be a scam, trying to exploit freqtrade's popularity for their own, nefarious gains.
|
|
||||||
|
|||||||
@@ -1,84 +0,0 @@
|
|||||||
# FreqUI
|
|
||||||
|
|
||||||
Freqtrade provides a builtin webserver, which can serve [FreqUI](https://github.com/freqtrade/frequi), the freqtrade frontend.
|
|
||||||
|
|
||||||
By default, the UI is automatically installed as part of the installation (script, docker).
|
|
||||||
freqUI can also be manually installed by using the `freqtrade install-ui` command.
|
|
||||||
This same command can also be used to update freqUI to new new releases.
|
|
||||||
|
|
||||||
Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured API port (by default `http://127.0.0.1:8080`).
|
|
||||||
|
|
||||||
??? Note "Looking to contribute to freqUI?"
|
|
||||||
Developers should not use this method, but instead clone the corresponding use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI. A working installation of node will be required to build the frontend.
|
|
||||||
|
|
||||||
!!! tip "freqUI is not required to run freqtrade"
|
|
||||||
freqUI is an optional component of freqtrade, and is not required to run the bot.
|
|
||||||
It is a frontend that can be used to monitor the bot and to interact with it - but freqtrade itself will work perfectly fine without it.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
FreqUI does not have it's own configuration file - but assumes a working setup for the [rest-api](rest-api.md) is available.
|
|
||||||
Please refer to the corresponding documentation page to get setup with freqUI
|
|
||||||
|
|
||||||
## UI
|
|
||||||
|
|
||||||
FreqUI is a modern, responsive web application that can be used to monitor and interact with your bot.
|
|
||||||
|
|
||||||
FreqUI provides a light, as well as a dark theme.
|
|
||||||
Themes can be easily switched via a prominent button at the top of the page.
|
|
||||||
The theme of the screenshots on this page will adapt to the selected documentation Theme, so to see the dark (or light) version, please switch the theme of the Documentation.
|
|
||||||
|
|
||||||
### Login
|
|
||||||
|
|
||||||
The below screenshot shows the login screen of freqUI.
|
|
||||||
|
|
||||||

|
|
||||||

|
|
||||||
|
|
||||||
!!! Hint "CORS"
|
|
||||||
The Cors error shown in this screenshot is due to the fact that the UI is running on a different port than the API, and [CORS](#cors) has not been setup correctly yet.
|
|
||||||
|
|
||||||
### Trade view
|
|
||||||
|
|
||||||
The trade view allows you to visualize the trades that the bot is making and to interact with the bot.
|
|
||||||
On this page, you can also interact with the bot by starting and stopping it and - if configured - force trade entries and exits.
|
|
||||||
|
|
||||||

|
|
||||||

|
|
||||||
|
|
||||||
### Plot Configurator
|
|
||||||
|
|
||||||
FreqUI Plots can be configured either via a `plot_config` configuration object in the strategy (which can be loaded via "from strategy" button) or via the UI.
|
|
||||||
Multiple plot configurations can be created and switched at will - allowing for flexible, different views into your charts.
|
|
||||||
|
|
||||||
The plot configuration can be accessed via the "Plot Configurator" (Cog icon) button in the top right corner of the trade view.
|
|
||||||
|
|
||||||

|
|
||||||

|
|
||||||
|
|
||||||
### Settings
|
|
||||||
|
|
||||||
Several UI related settings can be changed by accessing the settings page.
|
|
||||||
|
|
||||||
Things you can change (among others):
|
|
||||||
|
|
||||||
* Timezone of the UI
|
|
||||||
* Visualization of open trades as part of the favicon (browser tab)
|
|
||||||
* Candle colors (up/down -> red/green)
|
|
||||||
* Enable / disable in-app notification types
|
|
||||||
|
|
||||||

|
|
||||||

|
|
||||||
|
|
||||||
## Backtesting
|
|
||||||
|
|
||||||
When freqtrade is started in [webserver mode](utils.md#webserver-mode) (freqtrade started with `freqtrade webserver`), the backtesting view becomes available.
|
|
||||||
This view allows you to backtest strategies and visualize the results.
|
|
||||||
|
|
||||||
You can also load and visualize previous backtest results, as well as compare the results with each other.
|
|
||||||
|
|
||||||

|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
--8<-- "includes/cors.md"
|
|
||||||
@@ -9,7 +9,7 @@ FreqAI is configured through the typical [Freqtrade config file](configuration.m
|
|||||||
```json
|
```json
|
||||||
"freqai": {
|
"freqai": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"purge_old_models": 2,
|
"purge_old_models": true,
|
||||||
"train_period_days": 30,
|
"train_period_days": 30,
|
||||||
"backtest_period_days": 7,
|
"backtest_period_days": 7,
|
||||||
"identifier" : "unique-id",
|
"identifier" : "unique-id",
|
||||||
@@ -26,15 +26,15 @@ FreqAI is configured through the typical [Freqtrade config file](configuration.m
|
|||||||
},
|
},
|
||||||
"data_split_parameters" : {
|
"data_split_parameters" : {
|
||||||
"test_size": 0.25
|
"test_size": 0.25
|
||||||
}
|
},
|
||||||
|
"model_training_parameters" : {
|
||||||
|
"n_estimators": 100
|
||||||
|
},
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
A full example config is available in `config_examples/config_freqai.example.json`.
|
A full example config is available in `config_examples/config_freqai.example.json`.
|
||||||
|
|
||||||
!!! Note
|
|
||||||
The `identifier` is commonly overlooked by newcomers, however, this value plays an important role in your configuration. This value is a unique ID that you choose to describe one of your runs. Keeping it the same allows you to maintain crash resilience as well as faster backtesting. As soon as you want to try a new run (new features, new model, etc.), you should change this value (or delete the `user_data/models/unique-id` folder. More details available in the [parameter table](freqai-parameter-table.md#feature-parameters).
|
|
||||||
|
|
||||||
## Building a FreqAI strategy
|
## Building a FreqAI strategy
|
||||||
|
|
||||||
The FreqAI strategy requires including the following lines of code in the standard [Freqtrade strategy](strategy-customization.md):
|
The FreqAI strategy requires including the following lines of code in the standard [Freqtrade strategy](strategy-customization.md):
|
||||||
@@ -46,114 +46,119 @@ The FreqAI strategy requires including the following lines of code in the standa
|
|||||||
|
|
||||||
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||||
|
|
||||||
# the model will return all labels created by user in `set_freqai_targets()`
|
# the model will return all labels created by user in `populate_any_indicators`
|
||||||
# (& appended targets), an indication of whether or not the prediction should be accepted,
|
# (& appended targets), an indication of whether or not the prediction should be accepted,
|
||||||
# the target mean/std values for each of the labels created by user in
|
# the target mean/std values for each of the labels created by user in
|
||||||
# `set_freqai_targets()` for each training period.
|
# `populate_any_indicators()` for each training period.
|
||||||
|
|
||||||
dataframe = self.freqai.start(dataframe, metadata, self)
|
dataframe = self.freqai.start(dataframe, metadata, self)
|
||||||
|
|
||||||
return dataframe
|
return dataframe
|
||||||
|
|
||||||
def feature_engineering_expand_all(self, dataframe: DataFrame, period, **kwargs) -> DataFrame:
|
def populate_any_indicators(
|
||||||
|
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
*Only functional with FreqAI enabled strategies*
|
Function designed to automatically generate, name and merge features
|
||||||
This function will automatically expand the defined features on the config defined
|
from user indicated timeframes in the configuration file. User controls the indicators
|
||||||
`indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and
|
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||||
`include_corr_pairs`. In other words, a single feature defined in this function
|
(see convention below). I.e. user should not prepend any supporting metrics
|
||||||
will automatically expand to a total of
|
(e.g. bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||||
`indicator_periods_candles` * `include_timeframes` * `include_shifted_candles` *
|
model.
|
||||||
`include_corr_pairs` numbers of features added to the model.
|
:param pair: pair to be used as informative
|
||||||
|
:param df: strategy dataframe which will receive merges from informatives
|
||||||
All features must be prepended with `%` to be recognized by FreqAI internals.
|
:param tf: timeframe of the dataframe which will modify the feature names
|
||||||
|
:param informative: the dataframe associated with the informative pair
|
||||||
:param df: strategy dataframe which will receive the features
|
:param coin: the name of the coin which will modify the feature names.
|
||||||
:param period: period of the indicator - usage example:
|
|
||||||
dataframe["%-ema-period"] = ta.EMA(dataframe, timeperiod=period)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
|
coin = pair.split('/')[0]
|
||||||
dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
|
|
||||||
dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
|
|
||||||
dataframe["%-sma-period"] = ta.SMA(dataframe, timeperiod=period)
|
|
||||||
dataframe["%-ema-period"] = ta.EMA(dataframe, timeperiod=period)
|
|
||||||
|
|
||||||
return dataframe
|
if informative is None:
|
||||||
|
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||||
|
|
||||||
def feature_engineering_expand_basic(self, dataframe: DataFrame, **kwargs) -> DataFrame:
|
# first loop is automatically duplicating indicators for time periods
|
||||||
"""
|
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||||
*Only functional with FreqAI enabled strategies*
|
t = int(t)
|
||||||
This function will automatically expand the defined features on the config defined
|
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||||
`include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`.
|
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||||
In other words, a single feature defined in this function
|
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||||
will automatically expand to a total of
|
|
||||||
`include_timeframes` * `include_shifted_candles` * `include_corr_pairs`
|
|
||||||
numbers of features added to the model.
|
|
||||||
|
|
||||||
Features defined here will *not* be automatically duplicated on user defined
|
indicators = [col for col in informative if col.startswith("%")]
|
||||||
`indicator_periods_candles`
|
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||||
|
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||||
|
if n == 0:
|
||||||
|
continue
|
||||||
|
informative_shift = informative[indicators].shift(n)
|
||||||
|
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||||
|
informative = pd.concat((informative, informative_shift), axis=1)
|
||||||
|
|
||||||
All features must be prepended with `%` to be recognized by FreqAI internals.
|
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||||
|
skip_columns = [
|
||||||
|
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||||
|
]
|
||||||
|
df = df.drop(columns=skip_columns)
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the features
|
# Add generalized indicators here (because in live, it will call this
|
||||||
dataframe["%-pct-change"] = dataframe["close"].pct_change()
|
# function to populate indicators during training). Notice how we ensure not to
|
||||||
dataframe["%-ema-200"] = ta.EMA(dataframe, timeperiod=200)
|
# add them multiple times
|
||||||
"""
|
if set_generalized_indicators:
|
||||||
dataframe["%-pct-change"] = dataframe["close"].pct_change()
|
|
||||||
dataframe["%-raw_volume"] = dataframe["volume"]
|
|
||||||
dataframe["%-raw_price"] = dataframe["close"]
|
|
||||||
return dataframe
|
|
||||||
|
|
||||||
def feature_engineering_standard(self, dataframe: DataFrame, **kwargs) -> DataFrame:
|
# user adds targets here by prepending them with &- (see convention below)
|
||||||
"""
|
# If user wishes to use multiple targets, a multioutput prediction model
|
||||||
*Only functional with FreqAI enabled strategies*
|
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||||
This optional function will be called once with the dataframe of the base timeframe.
|
df["&-s_close"] = (
|
||||||
This is the final function to be called, which means that the dataframe entering this
|
df["close"]
|
||||||
function will contain all the features and columns created by all other
|
|
||||||
freqai_feature_engineering_* functions.
|
|
||||||
|
|
||||||
This function is a good place to do custom exotic feature extractions (e.g. tsfresh).
|
|
||||||
This function is a good place for any feature that should not be auto-expanded upon
|
|
||||||
(e.g. day of the week).
|
|
||||||
|
|
||||||
All features must be prepended with `%` to be recognized by FreqAI internals.
|
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the features
|
|
||||||
usage example: dataframe["%-day_of_week"] = (dataframe["date"].dt.dayofweek + 1) / 7
|
|
||||||
"""
|
|
||||||
dataframe["%-day_of_week"] = (dataframe["date"].dt.dayofweek + 1) / 7
|
|
||||||
dataframe["%-hour_of_day"] = (dataframe["date"].dt.hour + 1) / 25
|
|
||||||
return dataframe
|
|
||||||
|
|
||||||
def set_freqai_targets(self, dataframe: DataFrame, **kwargs) -> DataFrame:
|
|
||||||
"""
|
|
||||||
*Only functional with FreqAI enabled strategies*
|
|
||||||
Required function to set the targets for the model.
|
|
||||||
All targets must be prepended with `&` to be recognized by the FreqAI internals.
|
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the targets
|
|
||||||
usage example: dataframe["&-target"] = dataframe["close"].shift(-1) / dataframe["close"]
|
|
||||||
"""
|
|
||||||
dataframe["&-s_close"] = (
|
|
||||||
dataframe["close"]
|
|
||||||
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||||
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||||
.mean()
|
.mean()
|
||||||
/ dataframe["close"]
|
/ df["close"]
|
||||||
- 1
|
- 1
|
||||||
)
|
)
|
||||||
return dataframe
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Notice how the `feature_engineering_*()` is where [features](freqai-feature-engineering.md#feature-engineering) are added. Meanwhile `set_freqai_targets()` adds the labels/targets. A full example strategy is available in `templates/FreqaiExampleStrategy.py`.
|
Notice how the `populate_any_indicators()` is where [features](freqai-feature-engineering.md#feature-engineering) and labels/targets are added. A full example strategy is available in `templates/FreqaiExampleStrategy.py`.
|
||||||
|
|
||||||
|
Notice also the location of the labels under `if set_generalized_indicators:` at the bottom of the example. This is where single features and labels/targets should be added to the feature set to avoid duplication of them from various configuration parameters that multiply the feature set, such as `include_timeframes`.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
The `self.freqai.start()` function cannot be called outside the `populate_indicators()`.
|
The `self.freqai.start()` function cannot be called outside the `populate_indicators()`.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Features **must** be defined in `feature_engineering_*()`. Defining FreqAI features in `populate_indicators()`
|
Features **must** be defined in `populate_any_indicators()`. Defining FreqAI features in `populate_indicators()`
|
||||||
will cause the algorithm to fail in live/dry mode. In order to add generalized features that are not associated with a specific pair or timeframe, you should use `feature_engineering_standard()`
|
will cause the algorithm to fail in live/dry mode. In order to add generalized features that are not associated with a specific pair or timeframe, the following structure inside `populate_any_indicators()` should be used
|
||||||
(as exemplified in `freqtrade/templates/FreqaiExampleStrategy.py`).
|
(as exemplified in `freqtrade/templates/FreqaiExampleStrategy.py`):
|
||||||
|
|
||||||
|
```python
|
||||||
|
def populate_any_indicators(self, metadata, pair, df, tf, informative=None, coin="", set_generalized_indicators=False):
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
# Add generalized indicators here (because in live, it will call only this function to populate
|
||||||
|
# indicators for retraining). Notice how we ensure not to add them multiple times by associating
|
||||||
|
# these generalized indicators to the basepair/timeframe
|
||||||
|
if set_generalized_indicators:
|
||||||
|
df['%-day_of_week'] = (df["date"].dt.dayofweek + 1) / 7
|
||||||
|
df['%-hour_of_day'] = (df['date'].dt.hour + 1) / 25
|
||||||
|
|
||||||
|
# user adds targets here by prepending them with &- (see convention below)
|
||||||
|
# If user wishes to use multiple targets, a multioutput prediction model
|
||||||
|
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||||
|
df["&-s_close"] = (
|
||||||
|
df["close"]
|
||||||
|
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||||
|
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||||
|
.mean()
|
||||||
|
/ df["close"]
|
||||||
|
- 1
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Please see the example script located in `freqtrade/templates/FreqaiExampleStrategy.py` for a full example of `populate_any_indicators()`.
|
||||||
|
|
||||||
## Important dataframe key patterns
|
## Important dataframe key patterns
|
||||||
|
|
||||||
@@ -161,19 +166,18 @@ Below are the values you can expect to include/use inside a typical strategy dat
|
|||||||
|
|
||||||
| DataFrame Key | Description |
|
| DataFrame Key | Description |
|
||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| `df['&*']` | Any dataframe column prepended with `&` in `set_freqai_targets()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
|
| `df['&*']` | Any dataframe column prepended with `&` in `populate_any_indicators()` is treated as a training target (label) inside FreqAI (typically following the naming convention `&-s*`). For example, to predict the close price 40 candles into the future, you would set `df['&-s_close'] = df['close'].shift(-self.freqai_info["feature_parameters"]["label_period_candles"])` with `"label_period_candles": 40` in the config. FreqAI makes the predictions and gives them back under the same key (`df['&-s_close']`) to be used in `populate_entry/exit_trend()`. <br> **Datatype:** Depends on the output of the model.
|
||||||
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
|
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
|
||||||
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
|
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers()` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
|
||||||
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
|
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
|
||||||
| `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%` (see details below). <br> **Datatype:** Depends on the feature created by the user.
|
| `df['%*']` | Any dataframe column prepended with `%` in `populate_any_indicators()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model.
|
||||||
| `df['%%*']` | Any dataframe column prepended with `%%` in `feature_engineering_*()` is treated as a training feature, just the same as the above `%` prepend. However, in this case, the features are returned back to the strategy for FreqUI/plot-dataframe plotting and monitoring in Dry/Live/Backtesting <br> **Datatype:** Depends on the feature created by the user. Please note that features created in `feature_engineering_expand()` will have automatic FreqAI naming schemas depending on the expansions that you configured (i.e. `include_timeframes`, `include_corr_pairlist`, `indicators_periods_candles`, `include_shifted_candles`). So if you want to plot `%%-rsi` from `feature_engineering_expand_all()`, the final naming scheme for your plotting config would be: `%%-rsi-period_10_ETH/USDT:USDT_1h` for the `rsi` feature with `period=10`, `timeframe=1h`, and `pair=ETH/USDT:USDT` (the `:USDT` is added if you are using futures pairs). It is useful to simply add `print(dataframe.columns)` in your `populate_indicators()` after `self.freqai.start()` to see the full list of available features that are returned to the strategy for plotting purposes.
|
|
||||||
|
|
||||||
## Setting the `startup_candle_count`
|
## Setting the `startup_candle_count`
|
||||||
|
|
||||||
The `startup_candle_count` in the FreqAI strategy needs to be set up in the same way as in the standard Freqtrade strategy (see details [here](strategy-customization.md#strategy-startup-period)). This value is used by Freqtrade to ensure that a sufficient amount of data is provided when calling the `dataprovider`, to avoid any NaNs at the beginning of the first training. You can easily set this value by identifying the longest period (in candle units) which is passed to the indicator creation functions (e.g., TA-Lib functions). In the presented example, `startup_candle_count` is 20 since this is the maximum value in `indicators_periods_candles`.
|
The `startup_candle_count` in the FreqAI strategy needs to be set up in the same way as in the standard Freqtrade strategy (see details [here](strategy-customization.md#strategy-startup-period)). This value is used by Freqtrade to ensure that a sufficient amount of data is provided when calling the `dataprovider`, to avoid any NaNs at the beginning of the first training. You can easily set this value by identifying the longest period (in candle units) which is passed to the indicator creation functions (e.g., Ta-Lib functions). In the presented example, `startup_candle_count` is 20 since this is the maximum value in `indicators_periods_candles`.
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
There are instances where the TA-Lib functions actually require more data than just the passed `period` or else the feature dataset gets populated with NaNs. Anecdotally, multiplying the `startup_candle_count` by 2 always leads to a fully NaN free training dataset. Hence, it is typically safest to multiply the expected `startup_candle_count` by 2. Look out for this log message to confirm that the data is clean:
|
There are instances where the Ta-Lib functions actually require more data than just the passed `period` or else the feature dataset gets populated with NaNs. Anecdotally, multiplying the `startup_candle_count` by 2 always leads to a fully NaN free training dataset. Hence, it is typically safest to multiply the expected `startup_candle_count` by 2. Look out for this log message to confirm that the data is clean:
|
||||||
|
|
||||||
```
|
```
|
||||||
2022-08-31 15:14:04 - freqtrade.freqai.data_kitchen - INFO - dropped 0 training points due to NaNs in populated dataset 4319.
|
2022-08-31 15:14:04 - freqtrade.freqai.data_kitchen - INFO - dropped 0 training points due to NaNs in populated dataset 4319.
|
||||||
@@ -188,11 +192,11 @@ dataframe["target_roi"] = dataframe["&-s_close_mean"] + dataframe["&-s_close_std
|
|||||||
dataframe["sell_roi"] = dataframe["&-s_close_mean"] - dataframe["&-s_close_std"] * 1.25
|
dataframe["sell_roi"] = dataframe["&-s_close_mean"] - dataframe["&-s_close_std"] * 1.25
|
||||||
```
|
```
|
||||||
|
|
||||||
To consider the population of *historical predictions* for creating the dynamic target instead of information from the training as discussed above, you would set `fit_live_predictions_candles` in the config to the number of historical prediction candles you wish to use to generate target statistics.
|
To consider the population of *historical predictions* for creating the dynamic target instead of information from the training as discussed above, you would set `fit_live_prediction_candles` in the config to the number of historical prediction candles you wish to use to generate target statistics.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"freqai": {
|
"freqai": {
|
||||||
"fit_live_predictions_candles": 300,
|
"fit_live_prediction_candles": 300,
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -200,222 +204,14 @@ If this value is set, FreqAI will initially use the predictions from the trainin
|
|||||||
|
|
||||||
## Using different prediction models
|
## Using different prediction models
|
||||||
|
|
||||||
FreqAI has multiple example prediction model libraries that are ready to be used as is via the flag `--freqaimodel`. These libraries include `CatBoost`, `LightGBM`, and `XGBoost` regression, classification, and multi-target models, and can be found in `freqai/prediction_models/`.
|
FreqAI has multiple example prediction model libraries that are ready to be used as is via the flag `--freqaimodel`. These libraries include `Catboost`, `LightGBM`, and `XGBoost` regression, classification, and multi-target models, and can be found in `freqai/prediction_models/`. However, it is possible to customize and create your own prediction models using the `IFreqaiModel` class. You are encouraged to inherit `fit()`, `train()`, and `predict()` to let these customize various aspects of the training procedures.
|
||||||
|
|
||||||
Regression and classification models differ in what targets they predict - a regression model will predict a target of continuous values, for example what price BTC will be at tomorrow, whilst a classifier will predict a target of discrete values, for example if the price of BTC will go up tomorrow or not. This means that you have to specify your targets differently depending on which model type you are using (see details [below](#setting-model-targets)).
|
### Setting classifier targets
|
||||||
|
|
||||||
All of the aforementioned model libraries implement gradient boosted decision tree algorithms. They all work on the principle of ensemble learning, where predictions from multiple simple learners are combined to get a final prediction that is more stable and generalized. The simple learners in this case are decision trees. Gradient boosting refers to the method of learning, where each simple learner is built in sequence - the subsequent learner is used to improve on the error from the previous learner. If you want to learn more about the different model libraries you can find the information in their respective docs:
|
FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example:
|
||||||
|
|
||||||
* CatBoost: https://catboost.ai/en/docs/
|
|
||||||
* LightGBM: https://lightgbm.readthedocs.io/en/v3.3.2/#
|
|
||||||
* XGBoost: https://xgboost.readthedocs.io/en/stable/#
|
|
||||||
|
|
||||||
There are also numerous online articles describing and comparing the algorithms. Some relatively lightweight examples would be [CatBoost vs. LightGBM vs. XGBoost — Which is the best algorithm?](https://towardsdatascience.com/catboost-vs-lightgbm-vs-xgboost-c80f40662924#:~:text=In%20CatBoost%2C%20symmetric%20trees%2C%20or,the%20same%20depth%20can%20differ.) and [XGBoost, LightGBM or CatBoost — which boosting algorithm should I use?](https://medium.com/riskified-technology/xgboost-lightgbm-or-catboost-which-boosting-algorithm-should-i-use-e7fda7bb36bc). Keep in mind that the performance of each model is highly dependent on the application and so any reported metrics might not be true for your particular use of the model.
|
|
||||||
|
|
||||||
Apart from the models already available in FreqAI, it is also possible to customize and create your own prediction models using the `IFreqaiModel` class. You are encouraged to inherit `fit()`, `train()`, and `predict()` to customize various aspects of the training procedures. You can place custom FreqAI models in `user_data/freqaimodels` - and freqtrade will pick them up from there based on the provided `--freqaimodel` name - which has to correspond to the class name of your custom model.
|
|
||||||
Make sure to use unique names to avoid overriding built-in models.
|
|
||||||
|
|
||||||
### Setting model targets
|
|
||||||
|
|
||||||
#### Regressors
|
|
||||||
|
|
||||||
If you are using a regressor, you need to specify a target that has continuous values. FreqAI includes a variety of regressors, such as the `CatboostRegressor`via the flag `--freqaimodel CatboostRegressor`. An example of how you could set a regression target for predicting the price 100 candles into the future would be
|
|
||||||
|
|
||||||
```python
|
|
||||||
df['&s-close_price'] = df['close'].shift(-100)
|
|
||||||
```
|
|
||||||
|
|
||||||
If you want to predict multiple targets, you need to define multiple labels using the same syntax as shown above.
|
|
||||||
|
|
||||||
#### Classifiers
|
|
||||||
|
|
||||||
If you are using a classifier, you need to specify a target that has discrete values. FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example, if you want to predict if the price 100 candles into the future goes up or down you would set
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to predict multiple targets you must specify all labels in the same label column. You could, for example, add the label `same` to define where the price was unchanged by setting
|
Additionally, the example classifier models do not accommodate multiple labels, but they do allow multi-class classification within a single label column.
|
||||||
|
|
||||||
```python
|
|
||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
|
||||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) == df["close"], 'same', df['&s-up_or_down'])
|
|
||||||
```
|
|
||||||
|
|
||||||
## PyTorch Module
|
|
||||||
|
|
||||||
### Quick start
|
|
||||||
|
|
||||||
The easiest way to quickly run a pytorch model is with the following command (for regression task):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel PyTorchMLPRegressor --strategy-path freqtrade/templates
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Note "Installation/docker"
|
|
||||||
The PyTorch module requires large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl or PyTorch (~700mb additional space required) [y/N]?".
|
|
||||||
Users who prefer docker should ensure they use the docker image appended with `_freqaitorch`.
|
|
||||||
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file.
|
|
||||||
This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
|
|
||||||
|
|
||||||
### Structure
|
|
||||||
|
|
||||||
#### Model
|
|
||||||
|
|
||||||
You can construct your own Neural Network architecture in PyTorch by simply defining your `nn.Module` class inside your custom [`IFreqaiModel` file](#using-different-prediction-models) and then using that class in your `def train()` function. Here is an example of logistic regression model implementation using PyTorch (should be used with nn.BCELoss criterion) for classification tasks.
|
|
||||||
|
|
||||||
```python
|
|
||||||
|
|
||||||
class LogisticRegression(nn.Module):
|
|
||||||
def __init__(self, input_size: int):
|
|
||||||
super().__init__()
|
|
||||||
# Define your layers
|
|
||||||
self.linear = nn.Linear(input_size, 1)
|
|
||||||
self.activation = nn.Sigmoid()
|
|
||||||
|
|
||||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
||||||
# Define the forward pass
|
|
||||||
out = self.linear(x)
|
|
||||||
out = self.activation(out)
|
|
||||||
return out
|
|
||||||
|
|
||||||
class MyCoolPyTorchClassifier(BasePyTorchClassifier):
|
|
||||||
"""
|
|
||||||
This is a custom IFreqaiModel showing how a user might setup their own
|
|
||||||
custom Neural Network architecture for their training.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data_convertor(self) -> PyTorchDataConvertor:
|
|
||||||
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
|
|
||||||
|
|
||||||
def __init__(self, **kwargs) -> None:
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
config = self.freqai_info.get("model_training_parameters", {})
|
|
||||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
|
||||||
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
|
|
||||||
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
|
|
||||||
|
|
||||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
|
||||||
"""
|
|
||||||
User sets up the training and test data to fit their desired model here
|
|
||||||
:param data_dictionary: the dictionary holding all data for train, test,
|
|
||||||
labels, weights
|
|
||||||
:param dk: The datakitchen object for the current coin/model
|
|
||||||
"""
|
|
||||||
|
|
||||||
class_names = self.get_class_names()
|
|
||||||
self.convert_label_column_to_int(data_dictionary, dk, class_names)
|
|
||||||
n_features = data_dictionary["train_features"].shape[-1]
|
|
||||||
model = LogisticRegression(
|
|
||||||
input_dim=n_features
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
|
||||||
criterion = torch.nn.CrossEntropyLoss()
|
|
||||||
init_model = self.get_init_model(dk.pair)
|
|
||||||
trainer = PyTorchModelTrainer(
|
|
||||||
model=model,
|
|
||||||
optimizer=optimizer,
|
|
||||||
criterion=criterion,
|
|
||||||
model_meta_data={"class_names": class_names},
|
|
||||||
device=self.device,
|
|
||||||
init_model=init_model,
|
|
||||||
data_convertor=self.data_convertor,
|
|
||||||
**self.trainer_kwargs,
|
|
||||||
)
|
|
||||||
trainer.fit(data_dictionary, self.splits)
|
|
||||||
return trainer
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Trainer
|
|
||||||
|
|
||||||
The `PyTorchModelTrainer` performs the idiomatic PyTorch train loop:
|
|
||||||
Define our model, loss function, and optimizer, and then move them to the appropriate device (GPU or CPU). Inside the loop, we iterate through the batches in the dataloader, move the data to the device, compute the prediction and loss, backpropagate, and update the model parameters using the optimizer.
|
|
||||||
|
|
||||||
In addition, the trainer is responsible for the following:
|
|
||||||
- saving and loading the model
|
|
||||||
- converting the data from `pandas.DataFrame` to `torch.Tensor`.
|
|
||||||
|
|
||||||
#### Integration with Freqai module
|
|
||||||
|
|
||||||
Like all freqai models, PyTorch models inherit `IFreqaiModel`. `IFreqaiModel` declares three abstract methods: `train`, `fit`, and `predict`. we implement these methods in three levels of hierarchy.
|
|
||||||
From top to bottom:
|
|
||||||
|
|
||||||
1. `BasePyTorchModel` - Implements the `train` method. all `BasePyTorch*` inherit it. responsible for general data preparation (e.g., data normalization) and calling the `fit` method. Sets `device` attribute used by children classes. Sets `model_type` attribute used by the parent class.
|
|
||||||
2. `BasePyTorch*` - Implements the `predict` method. Here, the `*` represents a group of algorithms, such as classifiers or regressors. responsible for data preprocessing, predicting, and postprocessing if needed.
|
|
||||||
3. `PyTorch*Classifier` / `PyTorch*Regressor` - implements the `fit` method. responsible for the main train flaw, where we initialize the trainer and model objects.
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
#### Full example
|
|
||||||
|
|
||||||
Building a PyTorch regressor using MLP (multilayer perceptron) model, MSELoss criterion, and AdamW optimizer.
|
|
||||||
|
|
||||||
```python
|
|
||||||
class PyTorchMLPRegressor(BasePyTorchRegressor):
|
|
||||||
def __init__(self, **kwargs) -> None:
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
config = self.freqai_info.get("model_training_parameters", {})
|
|
||||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
|
||||||
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
|
|
||||||
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
|
|
||||||
|
|
||||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
|
||||||
n_features = data_dictionary["train_features"].shape[-1]
|
|
||||||
model = PyTorchMLPModel(
|
|
||||||
input_dim=n_features,
|
|
||||||
output_dim=1,
|
|
||||||
**self.model_kwargs
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
|
||||||
criterion = torch.nn.MSELoss()
|
|
||||||
init_model = self.get_init_model(dk.pair)
|
|
||||||
trainer = PyTorchModelTrainer(
|
|
||||||
model=model,
|
|
||||||
optimizer=optimizer,
|
|
||||||
criterion=criterion,
|
|
||||||
device=self.device,
|
|
||||||
init_model=init_model,
|
|
||||||
target_tensor_type=torch.float,
|
|
||||||
**self.trainer_kwargs,
|
|
||||||
)
|
|
||||||
trainer.fit(data_dictionary)
|
|
||||||
return trainer
|
|
||||||
```
|
|
||||||
|
|
||||||
Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. The `fit` method specifies the training building blocks: model, optimizer, criterion, and trainer. We inherit both `BasePyTorchRegressor` and `BasePyTorchModel`, where the former implements the `predict` method that is suitable for our regression task, and the latter implements the train method.
|
|
||||||
|
|
||||||
??? Note "Setting Class Names for Classifiers"
|
|
||||||
When using classifiers, the user must declare the class names (or targets) by overriding the `IFreqaiModel.class_names` attribute. This is achieved by setting `self.freqai.class_names` in the FreqAI strategy inside the `set_freqai_targets` method.
|
|
||||||
|
|
||||||
For example, if you are using a binary classifier to predict price movements as up or down, you can set the class names as follows:
|
|
||||||
```python
|
|
||||||
def set_freqai_targets(self, dataframe: DataFrame, metadata: dict, **kwargs) -> DataFrame:
|
|
||||||
self.freqai.class_names = ["down", "up"]
|
|
||||||
dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
|
|
||||||
dataframe["close"], 'up', 'down')
|
|
||||||
|
|
||||||
return dataframe
|
|
||||||
```
|
|
||||||
To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py).
|
|
||||||
|
|
||||||
|
|
||||||
#### Improving performance with `torch.compile()`
|
|
||||||
|
|
||||||
Torch provides a `torch.compile()` method that can be used to improve performance for specific GPU hardware. More details can be found [here](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). In brief, you simply wrap your `model` in `torch.compile()`:
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
|
||||||
model = PyTorchMLPModel(
|
|
||||||
input_dim=n_features,
|
|
||||||
output_dim=1,
|
|
||||||
**self.model_kwargs
|
|
||||||
)
|
|
||||||
model.to(self.device)
|
|
||||||
model = torch.compile(model)
|
|
||||||
```
|
|
||||||
|
|
||||||
Then proceed to use the model as normal. Keep in mind that doing this will remove eager execution, which means errors and tracebacks will not be informative.
|
|
||||||
|
|||||||
@@ -2,150 +2,96 @@
|
|||||||
|
|
||||||
## Defining the features
|
## Defining the features
|
||||||
|
|
||||||
Low level feature engineering is performed in the user strategy within a set of functions called `feature_engineering_*`. These function set the `base features` such as, `RSI`, `MFI`, `EMA`, `SMA`, time of day, volume, etc. The `base features` can be custom indicators or they can be imported from any technical-analysis library that you can find. FreqAI is equipped with a set of functions to simplify rapid large-scale feature engineering:
|
Low level feature engineering is performed in the user strategy within a function called `populate_any_indicators()`. That function sets the `base features` such as, `RSI`, `MFI`, `EMA`, `SMA`, time of day, volume, etc. The `base features` can be custom indicators or they can be imported from any technical-analysis library that you can find. One important syntax rule is that all `base features` string names are prepended with `%`, while labels/targets are prepended with `&`.
|
||||||
|
|
||||||
| Function | Description |
|
|
||||||
|---------------|-------------|
|
|
||||||
| `feature_engineering_expand_all()` | This optional function will automatically expand the defined features on the config defined `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`.
|
|
||||||
| `feature_engineering_expand_basic()` | This optional function will automatically expand the defined features on the config defined `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. Note: this function does *not* expand across `indicator_periods_candles`.
|
|
||||||
| `feature_engineering_standard()` | This optional function will be called once with the dataframe of the base timeframe. This is the final function to be called, which means that the dataframe entering this function will contain all the features and columns from the base asset created by the other `feature_engineering_expand` functions. This function is a good place to do custom exotic feature extractions (e.g. tsfresh). This function is also a good place for any feature that should not be auto-expanded upon (e.g., day of the week).
|
|
||||||
| `set_freqai_targets()` | Required function to set the targets for the model. All targets must be prepended with `&` to be recognized by the FreqAI internals.
|
|
||||||
|
|
||||||
Meanwhile, high level feature engineering is handled within `"feature_parameters":{}` in the FreqAI config. Within this file, it is possible to decide large scale feature expansions on top of the `base_features` such as "including correlated pairs" or "including informative timeframes" or even "including recent candles."
|
Meanwhile, high level feature engineering is handled within `"feature_parameters":{}` in the FreqAI config. Within this file, it is possible to decide large scale feature expansions on top of the `base_features` such as "including correlated pairs" or "including informative timeframes" or even "including recent candles."
|
||||||
|
|
||||||
It is advisable to start from the template `feature_engineering_*` functions in the source provided example strategy (found in `templates/FreqaiExampleStrategy.py`) to ensure that the feature definitions are following the correct conventions. Here is an example of how to set the indicators and labels in the strategy:
|
It is advisable to start from the template `populate_any_indicators()` in the source provided example strategy (found in `templates/FreqaiExampleStrategy.py`) to ensure that the feature definitions are following the correct conventions. Here is an example of how to set the indicators and labels in the strategy:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def feature_engineering_expand_all(self, dataframe: DataFrame, period, metadata, **kwargs) -> DataFrame:
|
def populate_any_indicators(
|
||||||
|
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
*Only functional with FreqAI enabled strategies*
|
Function designed to automatically generate, name, and merge features
|
||||||
This function will automatically expand the defined features on the config defined
|
from user-indicated timeframes in the configuration file. The user controls the indicators
|
||||||
`indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and
|
passed to the training/prediction by prepending indicators with `'%-' + coin `
|
||||||
`include_corr_pairs`. In other words, a single feature defined in this function
|
(see convention below). I.e., the user should not prepend any supporting metrics
|
||||||
will automatically expand to a total of
|
(e.g., bb_lowerband below) with % unless they explicitly want to pass that metric to the
|
||||||
`indicator_periods_candles` * `include_timeframes` * `include_shifted_candles` *
|
model.
|
||||||
`include_corr_pairs` numbers of features added to the model.
|
:param pair: pair to be used as informative
|
||||||
|
:param df: strategy dataframe which will receive merges from informatives
|
||||||
All features must be prepended with `%` to be recognized by FreqAI internals.
|
:param tf: timeframe of the dataframe which will modify the feature names
|
||||||
|
:param informative: the dataframe associated with the informative pair
|
||||||
Access metadata such as the current pair/timeframe/period with:
|
:param coin: the name of the coin which will modify the feature names.
|
||||||
|
|
||||||
`metadata["pair"]` `metadata["tf"]` `metadata["period"]`
|
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the features
|
|
||||||
:param period: period of the indicator - usage example:
|
|
||||||
:param metadata: metadata of current pair
|
|
||||||
dataframe["%-ema-period"] = ta.EMA(dataframe, timeperiod=period)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
|
coin = pair.split('/')[0]
|
||||||
dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
|
|
||||||
dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
|
if informative is None:
|
||||||
dataframe["%-sma-period"] = ta.SMA(dataframe, timeperiod=period)
|
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||||
dataframe["%-ema-period"] = ta.EMA(dataframe, timeperiod=period)
|
|
||||||
|
# first loop is automatically duplicating indicators for time periods
|
||||||
|
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||||
|
t = int(t)
|
||||||
|
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||||
|
|
||||||
bollinger = qtpylib.bollinger_bands(
|
bollinger = qtpylib.bollinger_bands(
|
||||||
qtpylib.typical_price(dataframe), window=period, stds=2.2
|
qtpylib.typical_price(informative), window=t, stds=2.2
|
||||||
)
|
)
|
||||||
dataframe["bb_lowerband-period"] = bollinger["lower"]
|
informative[f"{coin}bb_lowerband-period_{t}"] = bollinger["lower"]
|
||||||
dataframe["bb_middleband-period"] = bollinger["mid"]
|
informative[f"{coin}bb_middleband-period_{t}"] = bollinger["mid"]
|
||||||
dataframe["bb_upperband-period"] = bollinger["upper"]
|
informative[f"{coin}bb_upperband-period_{t}"] = bollinger["upper"]
|
||||||
|
|
||||||
dataframe["%-bb_width-period"] = (
|
informative[f"%-{coin}bb_width-period_{t}"] = (
|
||||||
dataframe["bb_upperband-period"]
|
informative[f"{coin}bb_upperband-period_{t}"]
|
||||||
- dataframe["bb_lowerband-period"]
|
- informative[f"{coin}bb_lowerband-period_{t}"]
|
||||||
) / dataframe["bb_middleband-period"]
|
) / informative[f"{coin}bb_middleband-period_{t}"]
|
||||||
dataframe["%-close-bb_lower-period"] = (
|
informative[f"%-{coin}close-bb_lower-period_{t}"] = (
|
||||||
dataframe["close"] / dataframe["bb_lowerband-period"]
|
informative["close"] / informative[f"{coin}bb_lowerband-period_{t}"]
|
||||||
)
|
)
|
||||||
|
|
||||||
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
|
informative[f"%-{coin}relative_volume-period_{t}"] = (
|
||||||
|
informative["volume"] / informative["volume"].rolling(t).mean()
|
||||||
dataframe["%-relative_volume-period"] = (
|
|
||||||
dataframe["volume"] / dataframe["volume"].rolling(period).mean()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return dataframe
|
indicators = [col for col in informative if col.startswith("%")]
|
||||||
|
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||||
|
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||||
|
if n == 0:
|
||||||
|
continue
|
||||||
|
informative_shift = informative[indicators].shift(n)
|
||||||
|
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||||
|
informative = pd.concat((informative, informative_shift), axis=1)
|
||||||
|
|
||||||
def feature_engineering_expand_basic(self, dataframe: DataFrame, metadata, **kwargs) -> DataFrame:
|
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||||
"""
|
skip_columns = [
|
||||||
*Only functional with FreqAI enabled strategies*
|
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||||
This function will automatically expand the defined features on the config defined
|
]
|
||||||
`include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`.
|
df = df.drop(columns=skip_columns)
|
||||||
In other words, a single feature defined in this function
|
|
||||||
will automatically expand to a total of
|
|
||||||
`include_timeframes` * `include_shifted_candles` * `include_corr_pairs`
|
|
||||||
numbers of features added to the model.
|
|
||||||
|
|
||||||
Features defined here will *not* be automatically duplicated on user defined
|
# Add generalized indicators here (because in live, it will call this
|
||||||
`indicator_periods_candles`
|
# function to populate indicators during training). Notice how we ensure not to
|
||||||
|
# add them multiple times
|
||||||
|
if set_generalized_indicators:
|
||||||
|
df["%-day_of_week"] = (df["date"].dt.dayofweek + 1) / 7
|
||||||
|
df["%-hour_of_day"] = (df["date"].dt.hour + 1) / 25
|
||||||
|
|
||||||
Access metadata such as the current pair/timeframe with:
|
# user adds targets here by prepending them with &- (see convention below)
|
||||||
|
# If user wishes to use multiple targets, a multioutput prediction model
|
||||||
`metadata["pair"]` `metadata["tf"]`
|
# needs to be used such as templates/CatboostPredictionMultiModel.py
|
||||||
|
df["&-s_close"] = (
|
||||||
All features must be prepended with `%` to be recognized by FreqAI internals.
|
df["close"]
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the features
|
|
||||||
:param metadata: metadata of current pair
|
|
||||||
dataframe["%-pct-change"] = dataframe["close"].pct_change()
|
|
||||||
dataframe["%-ema-200"] = ta.EMA(dataframe, timeperiod=200)
|
|
||||||
"""
|
|
||||||
dataframe["%-pct-change"] = dataframe["close"].pct_change()
|
|
||||||
dataframe["%-raw_volume"] = dataframe["volume"]
|
|
||||||
dataframe["%-raw_price"] = dataframe["close"]
|
|
||||||
return dataframe
|
|
||||||
|
|
||||||
def feature_engineering_standard(self, dataframe: DataFrame, metadata, **kwargs) -> DataFrame:
|
|
||||||
"""
|
|
||||||
*Only functional with FreqAI enabled strategies*
|
|
||||||
This optional function will be called once with the dataframe of the base timeframe.
|
|
||||||
This is the final function to be called, which means that the dataframe entering this
|
|
||||||
function will contain all the features and columns created by all other
|
|
||||||
freqai_feature_engineering_* functions.
|
|
||||||
|
|
||||||
This function is a good place to do custom exotic feature extractions (e.g. tsfresh).
|
|
||||||
This function is a good place for any feature that should not be auto-expanded upon
|
|
||||||
(e.g. day of the week).
|
|
||||||
|
|
||||||
Access metadata such as the current pair with:
|
|
||||||
|
|
||||||
`metadata["pair"]`
|
|
||||||
|
|
||||||
All features must be prepended with `%` to be recognized by FreqAI internals.
|
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the features
|
|
||||||
:param metadata: metadata of current pair
|
|
||||||
usage example: dataframe["%-day_of_week"] = (dataframe["date"].dt.dayofweek + 1) / 7
|
|
||||||
"""
|
|
||||||
dataframe["%-day_of_week"] = (dataframe["date"].dt.dayofweek + 1) / 7
|
|
||||||
dataframe["%-hour_of_day"] = (dataframe["date"].dt.hour + 1) / 25
|
|
||||||
return dataframe
|
|
||||||
|
|
||||||
def set_freqai_targets(self, dataframe: DataFrame, metadata, **kwargs) -> DataFrame:
|
|
||||||
"""
|
|
||||||
*Only functional with FreqAI enabled strategies*
|
|
||||||
Required function to set the targets for the model.
|
|
||||||
All targets must be prepended with `&` to be recognized by the FreqAI internals.
|
|
||||||
|
|
||||||
Access metadata such as the current pair with:
|
|
||||||
|
|
||||||
`metadata["pair"]`
|
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the targets
|
|
||||||
:param metadata: metadata of current pair
|
|
||||||
usage example: dataframe["&-target"] = dataframe["close"].shift(-1) / dataframe["close"]
|
|
||||||
"""
|
|
||||||
dataframe["&-s_close"] = (
|
|
||||||
dataframe["close"]
|
|
||||||
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
.shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||||
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
.rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
|
||||||
.mean()
|
.mean()
|
||||||
/ dataframe["close"]
|
/ df["close"]
|
||||||
- 1
|
- 1
|
||||||
)
|
)
|
||||||
|
|
||||||
return dataframe
|
return df
|
||||||
```
|
```
|
||||||
|
|
||||||
In the presented example, the user does not wish to pass the `bb_lowerband` as a feature to the model,
|
In the presented example, the user does not wish to pass the `bb_lowerband` as a feature to the model,
|
||||||
@@ -172,30 +118,15 @@ After having defined the `base features`, the next step is to expand upon them u
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `include_timeframes` in the config above are the timeframes (`tf`) of each call to `feature_engineering_expand_*()` in the strategy. In the presented case, the user is asking for the `5m`, `15m`, and `4h` timeframes of the `rsi`, `mfi`, `roc`, and `bb_width` to be included in the feature set.
|
The `include_timeframes` in the config above are the timeframes (`tf`) of each call to `populate_any_indicators()` in the strategy. In the presented case, the user is asking for the `5m`, `15m`, and `4h` timeframes of the `rsi`, `mfi`, `roc`, and `bb_width` to be included in the feature set.
|
||||||
|
|
||||||
You can ask for each of the defined features to be included also for informative pairs using the `include_corr_pairlist`. This means that the feature set will include all the features from `feature_engineering_expand_*()` on all the `include_timeframes` for each of the correlated pairs defined in the config (`ETH/USD`, `LINK/USD`, and `BNB/USD` in the presented example).
|
You can ask for each of the defined features to be included also for informative pairs using the `include_corr_pairlist`. This means that the feature set will include all the features from `populate_any_indicators` on all the `include_timeframes` for each of the correlated pairs defined in the config (`ETH/USD`, `LINK/USD`, and `BNB/USD` in the presented example).
|
||||||
|
|
||||||
`include_shifted_candles` indicates the number of previous candles to include in the feature set. For example, `include_shifted_candles: 2` tells FreqAI to include the past 2 candles for each of the features in the feature set.
|
`include_shifted_candles` indicates the number of previous candles to include in the feature set. For example, `include_shifted_candles: 2` tells FreqAI to include the past 2 candles for each of the features in the feature set.
|
||||||
|
|
||||||
In total, the number of features the user of the presented example strategy has created is: length of `include_timeframes` * no. features in `feature_engineering_expand_*()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
|
In total, the number of features the user of the presented example strat has created is: length of `include_timeframes` * no. features in `populate_any_indicators()` * length of `include_corr_pairlist` * no. `include_shifted_candles` * length of `indicator_periods_candles`
|
||||||
$= 3 * 3 * 3 * 2 * 2 = 108$.
|
$= 3 * 3 * 3 * 2 * 2 = 108$.
|
||||||
|
|
||||||
!!! note "Learn more about creative feature engineering"
|
|
||||||
Check out our [medium article](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665) geared toward helping users learn how to creatively engineer features.
|
|
||||||
|
|
||||||
### Gain finer control over `feature_engineering_*` functions with `metadata`
|
|
||||||
|
|
||||||
All `feature_engineering_*` and `set_freqai_targets()` functions are passed a `metadata` dictionary which contains information about the `pair`, `tf` (timeframe), and `period` that FreqAI is automating for feature building. As such, a user can use `metadata` inside `feature_engineering_*` functions as criteria for blocking/reserving features for certain timeframes, periods, pairs etc.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def feature_engineering_expand_all(self, dataframe: DataFrame, period, metadata, **kwargs) -> DataFrame:
|
|
||||||
if metadata["tf"] == "1h":
|
|
||||||
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
|
|
||||||
```
|
|
||||||
|
|
||||||
This will block `ta.ROC()` from being added to any timeframes other than `"1h"`.
|
|
||||||
|
|
||||||
### Returning additional info from training
|
### Returning additional info from training
|
||||||
|
|
||||||
Important metrics can be returned to the strategy at the end of each model training by assigning them to `dk.data['extra_returns_per_train']['my_new_value'] = XYZ` inside the custom prediction model class.
|
Important metrics can be returned to the strategy at the end of each model training by assigning them to `dk.data['extra_returns_per_train']['my_new_value'] = XYZ` inside the custom prediction model class.
|
||||||
@@ -212,7 +143,41 @@ Another example, where the user wants to use live metrics from the trade databas
|
|||||||
|
|
||||||
You need to set the standard dictionary in the config so that FreqAI can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, the pre-set values are what will be returned.
|
You need to set the standard dictionary in the config so that FreqAI can return proper dataframe shapes. These values will likely be overridden by the prediction model, but in the case where the model has yet to set them, or needs a default initial value, the pre-set values are what will be returned.
|
||||||
|
|
||||||
### Weighting features for temporal importance
|
## Feature normalization
|
||||||
|
|
||||||
|
FreqAI is strict when it comes to data normalization. The train features, $X^{train}$, are always normalized to [-1, 1] using a shifted min-max normalization:
|
||||||
|
|
||||||
|
$$X^{train}_{norm} = 2 * \frac{X^{train} - X^{train}.min()}{X^{train}.max() - X^{train}.min()} - 1$$
|
||||||
|
|
||||||
|
All other data (test data and unseen prediction data in dry/live/backtest) is always automatically normalized to the training feature space according to industry standards. FreqAI stores all the metadata required to ensure that test and prediction features will be properly normalized and that predictions are properly denormalized. For this reason, it is not recommended to eschew industry standards and modify FreqAI internals - however - advanced users can do so by inheriting `train()` in their custom `IFreqaiModel` and using their own normalization functions.
|
||||||
|
|
||||||
|
## Data dimensionality reduction with Principal Component Analysis
|
||||||
|
|
||||||
|
You can reduce the dimensionality of your features by activating the `principal_component_analysis` in the config:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"freqai": {
|
||||||
|
"feature_parameters" : {
|
||||||
|
"principal_component_analysis": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This will perform PCA on the features and reduce their dimensionality so that the explained variance of the data set is >= 0.999. Reducing data dimensionality makes training the model faster and hence allows for more up-to-date models.
|
||||||
|
|
||||||
|
## Inlier metric
|
||||||
|
|
||||||
|
The `inlier_metric` is a metric aimed at quantifying how similar a the features of a data point are to the most recent historic data points.
|
||||||
|
|
||||||
|
You define the lookback window by setting `inlier_metric_window` and FreqAI computes the distance between the present time point and each of the previous `inlier_metric_window` lookback points. A Weibull function is fit to each of the lookback distributions and its cumulative distribution function (CDF) is used to produce a quantile for each lookback point. The `inlier_metric` is then computed for each time point as the average of the corresponding lookback quantiles. The figure below explains the concept for an `inlier_metric_window` of 5.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
FreqAI adds the `inlier_metric` to the training features and hence gives the model access to a novel type of temporal information.
|
||||||
|
|
||||||
|
This function does **not** remove outliers from the data set.
|
||||||
|
|
||||||
|
## Weighting features for temporal importance
|
||||||
|
|
||||||
FreqAI allows you to set a `weight_factor` to weight recent data more strongly than past data via an exponential function:
|
FreqAI allows you to set a `weight_factor` to weight recent data more strongly than past data via an exponential function:
|
||||||
|
|
||||||
@@ -222,96 +187,6 @@ where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. B
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Building the data pipeline
|
|
||||||
|
|
||||||
By default, FreqAI builds a dynamic pipeline based on user configuration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`.
|
|
||||||
|
|
||||||
!!! note "More information available"
|
|
||||||
Please review the [parameter table](freqai-parameter-table.md) for more information on these parameters.
|
|
||||||
|
|
||||||
|
|
||||||
### Customizing the pipeline
|
|
||||||
|
|
||||||
Users are encouraged to customize the data pipeline to their needs by building their own data pipeline. This can be done by simply setting `dk.feature_pipeline` to their desired `Pipeline` object inside their `IFreqaiModel` `train()` function, or if they prefer not to touch the `train()` function, they can override `define_data_pipeline`/`define_label_pipeline` functions in their `IFreqaiModel`:
|
|
||||||
|
|
||||||
!!! note "More information available"
|
|
||||||
FreqAI uses the [`DataSieve`](https://github.com/emergentmethods/datasieve) pipeline, which follows the SKlearn pipeline API, but adds, among other features, coherence between the X, y, and sample_weight vector point removals, feature removal, feature name following.
|
|
||||||
|
|
||||||
```python
|
|
||||||
from datasieve.transforms import SKLearnWrapper, DissimilarityIndex
|
|
||||||
from datasieve.pipeline import Pipeline
|
|
||||||
from sklearn.preprocessing import QuantileTransformer, StandardScaler
|
|
||||||
from freqai.base_models import BaseRegressionModel
|
|
||||||
|
|
||||||
|
|
||||||
class MyFreqaiModel(BaseRegressionModel):
|
|
||||||
"""
|
|
||||||
Some cool custom model
|
|
||||||
"""
|
|
||||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
|
||||||
"""
|
|
||||||
My custom fit function
|
|
||||||
"""
|
|
||||||
model = cool_model.fit()
|
|
||||||
return model
|
|
||||||
|
|
||||||
def define_data_pipeline(self) -> Pipeline:
|
|
||||||
"""
|
|
||||||
User defines their custom feature pipeline here (if they wish)
|
|
||||||
"""
|
|
||||||
feature_pipeline = Pipeline([
|
|
||||||
('qt', SKLearnWrapper(QuantileTransformer(output_distribution='normal'))),
|
|
||||||
('di', ds.DissimilarityIndex(di_threshold=1))
|
|
||||||
])
|
|
||||||
|
|
||||||
return feature_pipeline
|
|
||||||
|
|
||||||
def define_label_pipeline(self) -> Pipeline:
|
|
||||||
"""
|
|
||||||
User defines their custom label pipeline here (if they wish)
|
|
||||||
"""
|
|
||||||
label_pipeline = Pipeline([
|
|
||||||
('qt', SKLearnWrapper(StandardScaler())),
|
|
||||||
])
|
|
||||||
|
|
||||||
return label_pipeline
|
|
||||||
```
|
|
||||||
|
|
||||||
Here, you are defining the exact pipeline that will be used for your feature set during training and prediction. You can use *most* SKLearn transformation steps by wrapping them in the `SKLearnWrapper` class as shown above. In addition, you can use any of the transformations available in the [`DataSieve` library](https://github.com/emergentmethods/datasieve).
|
|
||||||
|
|
||||||
You can easily add your own transformation by creating a class that inherits from the datasieve `BaseTransform` and implementing your `fit()`, `transform()` and `inverse_transform()` methods:
|
|
||||||
|
|
||||||
```python
|
|
||||||
from datasieve.transforms.base_transform import BaseTransform
|
|
||||||
# import whatever else you need
|
|
||||||
|
|
||||||
class MyCoolTransform(BaseTransform):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.param1 = kwargs.get('param1', 1)
|
|
||||||
|
|
||||||
def fit(self, X, y=None, sample_weight=None, feature_list=None, **kwargs):
|
|
||||||
# do something with X, y, sample_weight, or/and feature_list
|
|
||||||
return X, y, sample_weight, feature_list
|
|
||||||
|
|
||||||
def transform(self, X, y=None, sample_weight=None,
|
|
||||||
feature_list=None, outlier_check=False, **kwargs):
|
|
||||||
# do something with X, y, sample_weight, or/and feature_list
|
|
||||||
return X, y, sample_weight, feature_list
|
|
||||||
|
|
||||||
def inverse_transform(self, X, y=None, sample_weight=None, feature_list=None, **kwargs):
|
|
||||||
# do/dont do something with X, y, sample_weight, or/and feature_list
|
|
||||||
return X, y, sample_weight, feature_list
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! note "Hint"
|
|
||||||
You can define this custom class in the same file as your `IFreqaiModel`.
|
|
||||||
|
|
||||||
### Migrating a custom `IFreqaiModel` to the new Pipeline
|
|
||||||
|
|
||||||
If you have created your own custom `IFreqaiModel` with a custom `train()`/`predict()` function, *and* you still rely on `data_cleaning_train/predict()`, then you will need to migrate to the new pipeline. If your model does *not* rely on `data_cleaning_train/predict()`, then you do not need to worry about this migration.
|
|
||||||
|
|
||||||
More details about the migration can be found [here](strategy_migration.md#freqai---new-data-pipeline).
|
|
||||||
|
|
||||||
## Outlier detection
|
## Outlier detection
|
||||||
|
|
||||||
Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. FreqAI implements a variety of methods to identify such outliers and hence mitigate risk.
|
Equity and crypto markets suffer from a high level of non-patterned noise in the form of outlier data points. FreqAI implements a variety of methods to identify such outliers and hence mitigate risk.
|
||||||
@@ -330,7 +205,7 @@ You can tell FreqAI to remove outlier data points from the training/test data se
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Which will add `DissimilarityIndex` step to your `feature_pipeline` and set the threshold to 1. The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
|
The DI allows predictions which are outliers (not existent in the model feature space) to be thrown out due to low levels of certainty. To do so, FreqAI measures the distance between each training data point (feature vector), $X_{a}$, and all other training data points:
|
||||||
|
|
||||||
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
|
$$ d_{ab} = \sqrt{\sum_{j=1}^p(X_{a,j}-X_{b,j})^2} $$
|
||||||
|
|
||||||
@@ -364,9 +239,9 @@ You can tell FreqAI to remove outlier data points from the training/test data se
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Which will add `SVMOutlierExtractor` step to your `feature_pipeline`. The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
|
The SVM will be trained on the training data and any data point that the SVM deems to be beyond the feature space will be removed.
|
||||||
|
|
||||||
You can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu` via the `feature_parameters.svm_params` dictionary in the config.
|
FreqAI uses `sklearn.linear_model.SGDOneClassSVM` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDOneClassSVM.html) (external website)) and you can elect to provide additional parameters for the SVM, such as `shuffle`, and `nu`.
|
||||||
|
|
||||||
The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time.
|
The parameter `shuffle` is by default set to `False` to ensure consistent results. If it is set to `True`, running the SVM multiple times on the same data set might result in different outcomes due to `max_iter` being to low for the algorithm to reach the demanded `tol`. Increasing `max_iter` solves this issue but causes the procedure to take longer time.
|
||||||
|
|
||||||
@@ -384,25 +259,10 @@ You can configure FreqAI to use DBSCAN to cluster and remove outliers from the t
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Which will add the `DataSieveDBSCAN` step to your `feature_pipeline`. This is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be.
|
DBSCAN is an unsupervised machine learning algorithm that clusters data without needing to know how many clusters there should be.
|
||||||
|
|
||||||
Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters the data set by setting all data points that have $N-1$ other data points within a distance of $\varepsilon$ as *core points*. A data point that is within a distance of $\varepsilon$ from a *core point* but that does not have $N-1$ other data points within a distance of $\varepsilon$ from itself is considered an *edge point*. A cluster is then the collection of *core points* and *edge points*. Data points that have no other data points at a distance $<\varepsilon$ are considered outliers. The figure below shows a cluster with $N = 3$.
|
Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters the data set by setting all data points that have $N-1$ other data points within a distance of $\varepsilon$ as *core points*. A data point that is within a distance of $\varepsilon$ from a *core point* but that does not have $N-1$ other data points within a distance of $\varepsilon$ from itself is considered an *edge point*. A cluster is then the collection of *core points* and *edge points*. Data points that have no other data points at a distance $<\varepsilon$ are considered outliers. The figure below shows a cluster with $N = 3$.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html) (external website)) with `min_samples` ($N$) taken as 1/4 of the no. of time points (candles) in the feature set. `eps` ($\varepsilon$) is computed automatically as the elbow point in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html) (external website)) with `min_samples` ($N$) taken as 1/4 of the no. of time points (candles) in the feature set. `eps` ($\varepsilon$) is computed automatically as the elbow point in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set.
|
||||||
|
|
||||||
|
|
||||||
### Data dimensionality reduction with Principal Component Analysis
|
|
||||||
|
|
||||||
You can reduce the dimensionality of your features by activating the principal_component_analysis in the config:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"freqai": {
|
|
||||||
"feature_parameters" : {
|
|
||||||
"principal_component_analysis": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This will perform PCA on the features and reduce their dimensionality so that the explained variance of the data set is >= 0.999. Reducing data dimensionality makes training the model faster and hence allows for more up-to-date models.
|
|
||||||
|
|||||||
@@ -4,115 +4,58 @@ The table below will list all configuration parameters available for FreqAI. Som
|
|||||||
|
|
||||||
Mandatory parameters are marked as **Required** and have to be set in one of the suggested ways.
|
Mandatory parameters are marked as **Required** and have to be set in one of the suggested ways.
|
||||||
|
|
||||||
### General configuration parameters
|
|
||||||
|
|
||||||
| Parameter | Description |
|
| Parameter | Description |
|
||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| | **General configuration parameters within the `config.freqai` tree**
|
| | **General configuration parameters**
|
||||||
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling FreqAI. <br> **Datatype:** Dictionary.
|
| `freqai` | **Required.** <br> The parent dictionary containing all the parameters for controlling FreqAI. <br> **Datatype:** Dictionary.
|
||||||
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
| `train_period_days` | **Required.** <br> Number of days to use for the training data (width of the sliding window). <br> **Datatype:** Positive integer.
|
||||||
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the `train_period_days` window defined above, and retraining the model during backtesting (more info [here](freqai-running.md#backtesting)). This can be fractional days, but beware that the provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
| `backtest_period_days` | **Required.** <br> Number of days to inference from the trained model before sliding the `train_period_days` window defined above, and retraining the model during backtesting (more info [here](freqai-running.md#backtesting)). This can be fractional days, but beware that the provided `timerange` will be divided by this number to yield the number of trainings necessary to complete the backtest. <br> **Datatype:** Float.
|
||||||
| `identifier` | **Required.** <br> A unique ID for the current model. If models are saved to disk, the `identifier` allows for reloading specific pre-trained models/data. <br> **Datatype:** String.
|
| `identifier` | **Required.** <br> A unique ID for the current model. If models are saved to disk, the `identifier` allows for reloading specific pre-trained models/data. <br> **Datatype:** String.
|
||||||
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> **Datatype:** Float > 0. <br> Default: `0` (models retrain as often as possible).
|
| `live_retrain_hours` | Frequency of retraining during dry/live runs. <br> **Datatype:** Float > 0. <br> Default: `0` (models retrain as often as possible).
|
||||||
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> **Datatype:** Positive integer. <br> Default: `0` (models never expire).
|
| `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old. <br> **Datatype:** Positive integer. <br> Default: `0` (models never expire).
|
||||||
| `purge_old_models` | Number of models to keep on disk (not relevant to backtesting). Default is 2, which means that dry/live runs will keep the latest 2 models on disk. Setting to 0 keeps all models. This parameter also accepts a boolean to maintain backwards compatibility. <br> **Datatype:** Integer. <br> Default: `2`.
|
| `purge_old_models` | Delete obsolete models. <br> **Datatype:** Boolean. <br> Default: `False` (all historic models remain on disk).
|
||||||
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
|
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
|
||||||
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
||||||
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the connections here primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market. <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
|
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer.
|
| | **Feature parameters**
|
||||||
| `activate_tensorboard` | <br> Indicate whether or not to activate tensorboard for the tensorboard enabled modules (currently Reinforcment Learning, XGBoost, Catboost, and PyTorch). Tensorboard needs Torch installed, which means you will need the torch/RL docker image or you need to answer "yes" to the install question about whether or not you wish to install Torch. <br> **Datatype:** Boolean. <br> Default: `True`.
|
|
||||||
| `wait_for_training_iteration_on_reload` | <br> When using /reload or ctrl-c, wait for the current training iteration to finish before completing graceful shutdown. If set to `False`, FreqAI will break the current training iteration, allowing you to shutdown gracefully more quickly, but you will lose your current training iteration. <br> **Datatype:** Boolean. <br> Default: `True`.
|
|
||||||
|
|
||||||
### Feature parameters
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Feature parameters within the `freqai.feature_parameters` sub dictionary**
|
|
||||||
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md). <br> **Datatype:** Dictionary.
|
| `feature_parameters` | A dictionary containing the parameters used to engineer the feature set. Details and examples are shown [here](freqai-feature-engineering.md). <br> **Datatype:** Dictionary.
|
||||||
| `include_timeframes` | A list of timeframes that all indicators in `feature_engineering_expand_*()` will be created for. The list is added as features to the base indicators dataset. <br> **Datatype:** List of timeframes (strings).
|
| `include_timeframes` | A list of timeframes that all indicators in `populate_any_indicators` will be created for. The list is added as features to the base indicators dataset. <br> **Datatype:** List of timeframes (strings).
|
||||||
| `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `feature_engineering_expand_*()` during feature engineering (see details [here](freqai-feature-engineering.md)) will be created for each correlated coin. The correlated coins features are added to the base indicators dataset. <br> **Datatype:** List of assets (strings).
|
| `include_corr_pairlist` | A list of correlated coins that FreqAI will add as additional features to all `pair_whitelist` coins. All indicators set in `populate_any_indicators` during feature engineering (see details [here](freqai-feature-engineering.md)) will be created for each correlated coin. The correlated coins features are added to the base indicators dataset. <br> **Datatype:** List of assets (strings).
|
||||||
| `label_period_candles` | Number of candles into the future that the labels are created for. This can be used in `set_freqai_targets()` (see `templates/FreqaiExampleStrategy.py` for detailed usage). This parameter is not necessarily required, you can create custom labels and choose whether to make use of this parameter or not. Please see `templates/FreqaiExampleStrategy.py` to see the example usage. <br> **Datatype:** Positive integer.
|
| `label_period_candles` | Number of candles into the future that the labels are created for. This is used in `populate_any_indicators` (see `templates/FreqaiExampleStrategy.py` for detailed usage). You can create custom labels and choose whether to make use of this parameter or not. <br> **Datatype:** Positive integer.
|
||||||
| `include_shifted_candles` | Add features from previous candles to subsequent candles with the intent of adding historical information. If used, FreqAI will duplicate and shift all features from the `include_shifted_candles` previous candles so that the information is available for the subsequent candle. <br> **Datatype:** Positive integer.
|
| `include_shifted_candles` | Add features from previous candles to subsequent candles with the intent of adding historical information. If used, FreqAI will duplicate and shift all features from the `include_shifted_candles` previous candles so that the information is available for the subsequent candle. <br> **Datatype:** Positive integer.
|
||||||
| `weight_factor` | Weight training data points according to their recency (see details [here](freqai-feature-engineering.md#weighting-features-for-temporal-importance)). <br> **Datatype:** Positive float (typically < 1).
|
| `weight_factor` | Weight training data points according to their recency (see details [here](freqai-feature-engineering.md#weighting-features-for-temporal-importance)). <br> **Datatype:** Positive float (typically < 1).
|
||||||
| `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `feature_engineering_*()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN. <br> **Datatype:** Positive integer.
|
| `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN. <br> **Datatype:** Positive integer.
|
||||||
| `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers.
|
| `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset. <br> **Datatype:** List of positive integers.
|
||||||
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis) <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/<identifier>/sub-train-<COIN>_<timestamp>.html`. <br> **Datatype:** Integer. <br> Default: `0`.
|
| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. <br> **Datatype:** Integer. <br> Default: `0`.
|
||||||
| `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1).
|
| `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Positive float (typically < 1).
|
||||||
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
| `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Boolean.
|
||||||
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
| `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm). <br> **Datatype:** Dictionary.
|
||||||
| `use_DBSCAN_to_remove_outliers` | Cluster data using the DBSCAN algorithm to identify and remove outliers from training and prediction data. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
| `use_DBSCAN_to_remove_outliers` | Cluster data using the DBSCAN algorithm to identify and remove outliers from training and prediction data. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan). <br> **Datatype:** Boolean.
|
||||||
|
| `inlier_metric_window` | If set, FreqAI adds an `inlier_metric` to the training feature set and set the lookback to be the `inlier_metric_window`, i.e., the number of previous time points to compare the current candle to. Details of how the `inlier_metric` is computed can be found [here](freqai-feature-engineering.md#inlier-metric). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||||
| `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation). <br> **Datatype:** Integer. <br> Default: `0`.
|
| `noise_standard_deviation` | If set, FreqAI adds noise to the training features with the aim of preventing overfitting. FreqAI generates random deviates from a gaussian distribution with a standard deviation of `noise_standard_deviation` and adds them to all data points. `noise_standard_deviation` should be kept relative to the normalized space, i.e., between -1 and 1. In other words, since data in FreqAI is always normalized to be between -1 and 1, `noise_standard_deviation: 0.05` would result in 32% of the data being randomly increased/decreased by more than 2.5% (i.e., the percent of data falling within the first standard deviation). <br> **Datatype:** Integer. <br> Default: `0`.
|
||||||
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
| `outlier_protection_percentage` | Enable to prevent outlier detection methods from discarding too much data. If more than `outlier_protection_percentage` % of points are detected as outliers by the SVM or DBSCAN, FreqAI will log a warning message and ignore outlier detection, i.e., the original dataset will be kept intact. If the outlier protection is triggered, no predictions will be made based on the training dataset. <br> **Datatype:** Float. <br> Default: `30`.
|
||||||
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
| `reverse_train_test_order` | Split the feature dataset (see below) and use the latest data split for training and test on historical split of the data. This allows the model to be trained up to the most recent data point, while avoiding overfitting. However, you should be careful to understand the unorthodox nature of this parameter before employing it. <br> **Datatype:** Boolean. <br> Default: `False` (no reversal).
|
||||||
| `shuffle_after_split` | Split the data into train and test sets, and then shuffle both sets individually. <br> **Datatype:** Boolean. <br> Default: `False`.
|
| | **Data split parameters**
|
||||||
| `buffer_train_data_candles` | Cut `buffer_train_data_candles` off the beginning and end of the training data *after* the indicators were populated. The main example use is when predicting maxima and minima, the argrelextrema function cannot know the maxima/minima at the edges of the timerange. To improve model accuracy, it is best to compute argrelextrema on the full timerange and then use this function to cut off the edges (buffer) by the kernel. In another case, if the targets are set to a shifted price movement, this buffer is unnecessary because the shifted candles at the end of the timerange will be NaN and FreqAI will automatically cut those off of the training dataset.<br> **Datatype:** Integer. <br> Default: `0`.
|
| `data_split_parameters` | Include any additional parameters available from Scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
||||||
|
|
||||||
### Data split parameters
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Data split parameters within the `freqai.data_split_parameters` sub dictionary**
|
|
||||||
| `data_split_parameters` | Include any additional parameters available from scikit-learn `test_train_split()`, which are shown [here](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website). <br> **Datatype:** Dictionary.
|
|
||||||
| `test_size` | The fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
| `test_size` | The fraction of data that should be used for testing instead of training. <br> **Datatype:** Positive float < 1.
|
||||||
| `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`. <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `shuffle` | Shuffle the training data points during training. Typically, to not remove the chronological order of data in time-series forecasting, this is set to `False`. <br> **Datatype:** Boolean. <br> Defaut: `False`.
|
||||||
|
| | **Model training parameters**
|
||||||
### Model training parameters
|
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the selected model library. For example, if you use `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If you select a different model, this dictionary can contain any parameter from that model. <br> **Datatype:** Dictionary.
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Model training parameters within the `freqai.model_training_parameters` sub dictionary**
|
|
||||||
| `model_training_parameters` | A flexible dictionary that includes all parameters available by the selected model library. For example, if you use `LightGBMRegressor`, this dictionary can contain any parameter available by the `LightGBMRegressor` [here](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html) (external website). If you select a different model, this dictionary can contain any parameter from that model. A list of the currently available models can be found [here](freqai-configuration.md#using-different-prediction-models). <br> **Datatype:** Dictionary.
|
|
||||||
| `n_estimators` | The number of boosted trees to fit in the training of the model. <br> **Datatype:** Integer.
|
| `n_estimators` | The number of boosted trees to fit in the training of the model. <br> **Datatype:** Integer.
|
||||||
| `learning_rate` | Boosting learning rate during training of the model. <br> **Datatype:** Float.
|
| `learning_rate` | Boosting learning rate during training of the model. <br> **Datatype:** Float.
|
||||||
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
| `n_jobs`, `thread_count`, `task_type` | Set the number of threads for parallel processing and the `task_type` (`gpu` or `cpu`). Different model libraries use different parameter names. <br> **Datatype:** Float.
|
||||||
|
| | *Reinforcement Learning Parameters**
|
||||||
### Reinforcement Learning parameters
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Reinforcement Learning Parameters within the `freqai.rl_config` sub dictionary**
|
|
||||||
| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model. <br> **Datatype:** Dictionary.
|
| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model. <br> **Datatype:** Dictionary.
|
||||||
| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points. <br> **Datatype:** Integer.
|
| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points. <br> **Datatype:** Integer.
|
||||||
| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the customizable `calculate_reward()` function. <br> **Datatype:** int.
|
| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process. <br> **Datatype:** int.
|
||||||
| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentation. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website) <br> **Datatype:** string.
|
| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the user customizable `calculate_reward()` <br> **Datatype:** int.
|
||||||
|
| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website) <br> **Datatype:** string.
|
||||||
| `policy_type` | One of the available policy types from stable_baselines3 <br> **Datatype:** string.
|
| `policy_type` | One of the available policy types from stable_baselines3 <br> **Datatype:** string.
|
||||||
| `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training. <br> **Datatype:** float. <br> Default: 0.8
|
| `max_training_drawdown_pct` | The maximum drawdown that the agent is allowed to experience during training. <br> **Datatype:** float. <br> Default: 0.8
|
||||||
| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). Recommended to leave this untouched, by default, this value is set to the total number of physical cores minus 1. <br> **Datatype:** int.
|
| `cpu_count` | Number of threads/cpus to dedicate to the Reinforcement Learning training process (depending on if `ReinforcementLearning_multiproc` is selected or not). <br> **Datatype:** int.
|
||||||
| `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py` <br> **Datatype:** int.
|
| `model_reward_parameters` | Parameters used inside the user customizable `calculate_reward()` function in `ReinforcementLearner.py` <br> **Datatype:** int.
|
||||||
| `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting. <br> **Datatype:** bool. <br> Default: `False`.
|
|
||||||
| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each.
|
|
||||||
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
|
|
||||||
| `drop_ohlc_from_features` | Do not include the normalized ohlc data in the feature set passed to the agent during training (ohlc will still be used for driving the environment in all cases) <br> **Datatype:** Boolean. <br> **Default:** `False`
|
|
||||||
| `progress_bar` | Display a progress bar with the current progress, elapsed time and estimated remaining time. <br> **Datatype:** Boolean. <br> Default: `False`.
|
|
||||||
|
|
||||||
### PyTorch parameters
|
|
||||||
|
|
||||||
#### general
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Model training parameters within the `freqai.model_training_parameters` sub dictionary**
|
|
||||||
| `learning_rate` | Learning rate to be passed to the optimizer. <br> **Datatype:** float. <br> Default: `3e-4`.
|
|
||||||
| `model_kwargs` | Parameters to be passed to the model class. <br> **Datatype:** dict. <br> Default: `{}`.
|
|
||||||
| `trainer_kwargs` | Parameters to be passed to the trainer class. <br> **Datatype:** dict. <br> Default: `{}`.
|
|
||||||
|
|
||||||
#### trainer_kwargs
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|--------------|-------------|
|
|
||||||
| | **Model training parameters within the `freqai.model_training_parameters.model_kwargs` sub dictionary**
|
|
||||||
| `n_epochs` | The `n_epochs` parameter is a crucial setting in the PyTorch training loop that determines the number of times the entire training dataset will be used to update the model's parameters. An epoch represents one full pass through the entire training dataset. Overrides `n_steps`. Either `n_epochs` or `n_steps` must be set. <br><br> **Datatype:** int. optional. <br> Default: `10`.
|
|
||||||
| `n_steps` | An alternative way of setting `n_epochs` - the number of training iterations to run. Iteration here refer to the number of times we call `optimizer.step()`. Ignored if `n_epochs` is set. A simplified version of the function: <br><br> n_epochs = n_steps / (n_obs / batch_size) <br><br> The motivation here is that `n_steps` is easier to optimize and keep stable across different n_obs - the number of data points. <br> <br> **Datatype:** int. optional. <br> Default: `None`.
|
|
||||||
| `batch_size` | The size of the batches to use during training. <br><br> **Datatype:** int. <br> Default: `64`.
|
|
||||||
|
|
||||||
|
|
||||||
### Additional parameters
|
|
||||||
|
|
||||||
| Parameter | Description |
|
|
||||||
|------------|-------------|
|
|
||||||
| | **Extraneous parameters**
|
| | **Extraneous parameters**
|
||||||
| `freqai.keras` | If the selected model makes use of Keras (typical for TensorFlow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `keras` | If the selected model makes use of Keras (typical for Tensorflow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `freqai.conv_width` | The width of a neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
| `conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
||||||
| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.
|
|
||||||
@@ -1,32 +1,7 @@
|
|||||||
# Reinforcement Learning
|
# Reinforcement Learning
|
||||||
|
|
||||||
!!! Note "Installation size"
|
!!! Note
|
||||||
Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?".
|
Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?" Users who prefer docker should ensure they use the docker image appended with `_freqaiRL`.
|
||||||
Users who prefer docker should ensure they use the docker image appended with `_freqairl`.
|
|
||||||
|
|
||||||
## Background and terminology
|
|
||||||
|
|
||||||
### What is RL and why does FreqAI need it?
|
|
||||||
|
|
||||||
Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-a-custom-reward-function)). The reward is used to train weights in a neural network.
|
|
||||||
|
|
||||||
A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.*
|
|
||||||
|
|
||||||
Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors.
|
|
||||||
|
|
||||||
### The RL interface
|
|
||||||
|
|
||||||
With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-a-custom-reward-function).
|
|
||||||
|
|
||||||
We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-a-custom-reward-function), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely.
|
|
||||||
|
|
||||||
The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.Env` which means that it is necessary to write an entirely new environment in order to switch to a different library.
|
|
||||||
|
|
||||||
### Important considerations
|
|
||||||
|
|
||||||
As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL training environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks like `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world.
|
|
||||||
|
|
||||||
## Running Reinforcement Learning
|
|
||||||
|
|
||||||
Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line:
|
Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, `--freqaimodel` and `--strategy`, must be defined on the command line:
|
||||||
|
|
||||||
@@ -34,41 +9,70 @@ Setting up and running a Reinforcement Learning model is the same as running a R
|
|||||||
freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json
|
freqtrade trade --freqaimodel ReinforcementLearner --strategy MyRLStrategy --config config.json
|
||||||
```
|
```
|
||||||
|
|
||||||
where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner` (or a custom user defined one located in `user_data/freqaimodels`). The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `feature_engineering_*` as a typical Regressor. The difference lies in the creation of the targets, Reinforcement Learning doesn't require them. However, FreqAI requires a default (neutral) value to be set in the action column:
|
where `ReinforcementLearner` will use the templated `ReinforcementLearner` from `freqai/prediction_models/ReinforcementLearner`. The strategy, on the other hand, follows the same base [feature engineering](freqai-feature-engineering.md) with `populate_any_indicators` as a typical Regressor:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def set_freqai_targets(self, dataframe, **kwargs) -> DataFrame:
|
def populate_any_indicators(
|
||||||
"""
|
self, pair, df, tf, informative=None, set_generalized_indicators=False
|
||||||
*Only functional with FreqAI enabled strategies*
|
):
|
||||||
Required function to set the targets for the model.
|
|
||||||
All targets must be prepended with `&` to be recognized by the FreqAI internals.
|
|
||||||
|
|
||||||
More details about feature engineering available:
|
coin = pair.split('/')[0]
|
||||||
|
|
||||||
https://www.freqtrade.io/en/latest/freqai-feature-engineering
|
if informative is None:
|
||||||
|
informative = self.dp.get_pair_dataframe(pair, tf)
|
||||||
|
|
||||||
|
# first loop is automatically duplicating indicators for time periods
|
||||||
|
for t in self.freqai_info["feature_parameters"]["indicator_periods_candles"]:
|
||||||
|
|
||||||
|
t = int(t)
|
||||||
|
informative[f"%-{coin}rsi-period_{t}"] = ta.RSI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}mfi-period_{t}"] = ta.MFI(informative, timeperiod=t)
|
||||||
|
informative[f"%-{coin}adx-period_{t}"] = ta.ADX(informative, window=t)
|
||||||
|
|
||||||
|
# The following features are necessary for RL models
|
||||||
|
informative[f"%-{coin}raw_close"] = informative["close"]
|
||||||
|
informative[f"%-{coin}raw_open"] = informative["open"]
|
||||||
|
informative[f"%-{coin}raw_high"] = informative["high"]
|
||||||
|
informative[f"%-{coin}raw_low"] = informative["low"]
|
||||||
|
|
||||||
|
indicators = [col for col in informative if col.startswith("%")]
|
||||||
|
# This loop duplicates and shifts all indicators to add a sense of recency to data
|
||||||
|
for n in range(self.freqai_info["feature_parameters"]["include_shifted_candles"] + 1):
|
||||||
|
if n == 0:
|
||||||
|
continue
|
||||||
|
informative_shift = informative[indicators].shift(n)
|
||||||
|
informative_shift = informative_shift.add_suffix("_shift-" + str(n))
|
||||||
|
informative = pd.concat((informative, informative_shift), axis=1)
|
||||||
|
|
||||||
|
df = merge_informative_pair(df, informative, self.config["timeframe"], tf, ffill=True)
|
||||||
|
skip_columns = [
|
||||||
|
(s + "_" + tf) for s in ["date", "open", "high", "low", "close", "volume"]
|
||||||
|
]
|
||||||
|
df = df.drop(columns=skip_columns)
|
||||||
|
|
||||||
|
# Add generalized indicators here (because in live, it will call this
|
||||||
|
# function to populate indicators during training). Notice how we ensure not to
|
||||||
|
# add them multiple times
|
||||||
|
if set_generalized_indicators:
|
||||||
|
|
||||||
:param df: strategy dataframe which will receive the targets
|
|
||||||
usage example: dataframe["&-target"] = dataframe["close"].shift(-1) / dataframe["close"]
|
|
||||||
"""
|
|
||||||
# For RL, there are no direct targets to set. This is filler (neutral)
|
# For RL, there are no direct targets to set. This is filler (neutral)
|
||||||
# until the agent sends an action.
|
# until the agent sends an action.
|
||||||
dataframe["&-action"] = 0
|
df["&-action"] = 0
|
||||||
return dataframe
|
|
||||||
|
return df
|
||||||
```
|
```
|
||||||
|
|
||||||
Most of the function remains the same as for typical Regressors, however, the function below shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment:
|
Most of the function remains the same as for typical Regressors, however, the function above shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environent:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def feature_engineering_standard(self, dataframe: DataFrame, **kwargs) -> DataFrame:
|
|
||||||
# The following features are necessary for RL models
|
# The following features are necessary for RL models
|
||||||
dataframe[f"%-raw_close"] = dataframe["close"]
|
informative[f"%-{coin}raw_close"] = informative["close"]
|
||||||
dataframe[f"%-raw_open"] = dataframe["open"]
|
informative[f"%-{coin}raw_open"] = informative["open"]
|
||||||
dataframe[f"%-raw_high"] = dataframe["high"]
|
informative[f"%-{coin}raw_high"] = informative["high"]
|
||||||
dataframe[f"%-raw_low"] = dataframe["low"]
|
informative[f"%-{coin}raw_low"] = informative["low"]
|
||||||
return dataframe
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, there is no explicit "label" to make - instead it is necessary to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action.
|
Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the user set the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action.
|
||||||
|
|
||||||
After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy:
|
After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy:
|
||||||
|
|
||||||
@@ -107,12 +111,11 @@ It is important to consider that `&-action` depends on which environment they ch
|
|||||||
|
|
||||||
## Configuring the Reinforcement Learner
|
## Configuring the Reinforcement Learner
|
||||||
|
|
||||||
In order to configure the `Reinforcement Learner` the following dictionary must exist in the `freqai` config:
|
In order to configure the `Reinforcement Learner` the following dictionary to their `freqai` config:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
"rl_config": {
|
"rl_config": {
|
||||||
"train_cycles": 25,
|
"train_cycles": 25,
|
||||||
"add_state_info": true,
|
|
||||||
"max_trade_duration_candles": 300,
|
"max_trade_duration_candles": 300,
|
||||||
"max_training_drawdown_pct": 0.02,
|
"max_training_drawdown_pct": 0.02,
|
||||||
"cpu_count": 8,
|
"cpu_count": 8,
|
||||||
@@ -125,87 +128,30 @@ In order to configure the `Reinforcement Learner` the following dictionary must
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link).
|
Parameter details can be found [here](freqai-parameter-table.md), but in general the `train_cycles` decides how many times the agent should cycle through the candle data in its artificial environemtn to train weights in the model. `model_type` is a string which selects one of the available models in [stable_baselines](https://stable-baselines3.readthedocs.io/en/master/)(external link).
|
||||||
|
|
||||||
!!! Note
|
## Creating the reward
|
||||||
If you would like to experiment with `continual_learning`, then you should set that value to `true` in the main `freqai` configuration dictionary. This will tell the Reinforcement Learning library to continue training new models from the final state of previous models, instead of retraining new models from scratch each time a retrain is initiated.
|
|
||||||
|
|
||||||
!!! Note
|
As users begin to modify the strategy and the prediction model, they will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, the user sets a `calculate_reward()` function inside their custom `ReinforcementLearner.py` file. A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to give users the necessary building blocks to start their own models. It is inside the `calculate_reward()` where users express their creative theories about the market. For example, the user wants to reward their agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, the user wishes to reward the agnet for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
|
||||||
Remember that the general `model_training_parameters` dictionary should contain all the model hyperparameter customizations for the particular `model_type`. For example, `PPO` parameters can be found [here](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html).
|
|
||||||
|
|
||||||
## Creating a custom reward function
|
|
||||||
|
|
||||||
!!! danger "Not for production"
|
|
||||||
Warning!
|
|
||||||
The reward function provided with the Freqtrade source code is a showcase of functionality designed to show/test as many possible environment control features as possible. It is also designed to run quickly on small computers. This is a benchmark, it is *not* for live production. Please beware that you will need to create your own custom_reward() function or use a template built by other users outside of the Freqtrade source code.
|
|
||||||
|
|
||||||
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but this is *not* designed for production. Users *must* create their own custom reinforcement learning model class or use a pre-built one from outside the Freqtrade source code and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
|
|
||||||
|
|
||||||
!!! note "Hint"
|
|
||||||
The best reward functions are ones that are continuously differentiable, and well scaled. In other words, adding a single large negative penalty to a rare event is not a good idea, and the neural net will not be able to learn that function. Instead, it is better to add a small negative penalty to a common event. This will help the agent learn faster. Not only this, but you can help improve the continuity of your rewards/penalties by having them scale with severity according to some linear/exponential functions. In other words, you'd slowly scale the penalty as the duration of the trade increases. This is better than a single large penalty occurring at a single point in time.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
|
||||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
|
||||||
|
|
||||||
|
|
||||||
class MyCoolRLModel(ReinforcementLearner):
|
|
||||||
"""
|
|
||||||
User created RL prediction model.
|
|
||||||
|
|
||||||
Save this file to `freqtrade/user_data/freqaimodels`
|
|
||||||
|
|
||||||
then use it with:
|
|
||||||
|
|
||||||
freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat
|
|
||||||
|
|
||||||
Here the users can override any of the functions
|
|
||||||
available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this
|
|
||||||
is where the user overrides `MyRLEnv` (see below), to define custom
|
|
||||||
`calculate_reward()` function, or to override any other parts of the environment.
|
|
||||||
|
|
||||||
This class also allows users to override any other part of the IFreqaiModel tree.
|
|
||||||
For example, the user can override `def fit()` or `def train()` or `def predict()`
|
|
||||||
to take fine-tuned control over these processes.
|
|
||||||
|
|
||||||
Another common override may be `def data_cleaning_predict()` where the user can
|
|
||||||
take fine-tuned control over the data handling pipeline.
|
|
||||||
"""
|
|
||||||
class MyRLEnv(Base5ActionRLEnv):
|
class MyRLEnv(Base5ActionRLEnv):
|
||||||
"""
|
"""
|
||||||
User made custom environment. This class inherits from BaseEnvironment and gym.Env.
|
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
||||||
Users can override any functions from those parent classes. Here is an example
|
Users can override any functions from those parent classes. Here is an example
|
||||||
of a user customized `calculate_reward()` function.
|
of a user customized `calculate_reward()` function.
|
||||||
|
|
||||||
Warning!
|
|
||||||
This is function is a showcase of functionality designed to show as many possible
|
|
||||||
environment control features as possible. It is also designed to run quickly
|
|
||||||
on small computers. This is a benchmark, it is *not* for live production.
|
|
||||||
"""
|
"""
|
||||||
def calculate_reward(self, action: int) -> float:
|
def calculate_reward(self, action):
|
||||||
# first, penalize if the action is not valid
|
# first, penalize if the action is not valid
|
||||||
if not self._is_valid(action):
|
if not self._is_valid(action):
|
||||||
return -2
|
return -2
|
||||||
pnl = self.get_unrealized_profit()
|
pnl = self.get_unrealized_profit()
|
||||||
|
|
||||||
factor = 100
|
factor = 100
|
||||||
|
|
||||||
pair = self.pair.replace(':', '')
|
|
||||||
|
|
||||||
# you can use feature values from dataframe
|
|
||||||
# Assumes the shifted RSI indicator has been generated in the strategy.
|
|
||||||
rsi_now = self.raw_features[f"%-rsi-period_10_shift-1_{pair}_"
|
|
||||||
f"{self.config['timeframe']}"].iloc[self._current_tick]
|
|
||||||
|
|
||||||
# reward agent for entering trades
|
# reward agent for entering trades
|
||||||
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
|
if action in (Actions.Long_enter.value, Actions.Short_enter.value) \
|
||||||
and self._position == Positions.Neutral):
|
and self._position == Positions.Neutral:
|
||||||
if rsi_now < 40:
|
return 25
|
||||||
factor = 40 / rsi_now
|
|
||||||
else:
|
|
||||||
factor = 1
|
|
||||||
return 25 * factor
|
|
||||||
|
|
||||||
# discourage agent from not entering trades
|
# discourage agent from not entering trades
|
||||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
return -1
|
return -1
|
||||||
@@ -232,49 +178,25 @@ class MyCoolRLModel(ReinforcementLearner):
|
|||||||
return 0.
|
return 0.
|
||||||
```
|
```
|
||||||
|
|
||||||
## Using Tensorboard
|
### Creating a custom agent
|
||||||
|
|
||||||
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
|
Users can inherit from `stable_baselines3` and customize anything they wish about their agent. Doing this is for advanced users only, an example is presented in `freqai/RL/ReinforcementLearnerCustomAgent.py`
|
||||||
|
|
||||||
|
### Using Tensorboard
|
||||||
|
|
||||||
|
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. To start, the user should ensure Tensorboard is installed on their computer:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
pip3 install tensorboard
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, the user can activate Tensorboard with the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd freqtrade
|
||||||
tensorboard --logdir user_data/models/unique-id
|
tensorboard --logdir user_data/models/unique-id
|
||||||
```
|
```
|
||||||
|
|
||||||
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in the browser at 127.0.0.1:6006 (6006 is the default port used by Tensorboard).
|
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if the user wishes to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard).
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Custom logging
|
|
||||||
|
|
||||||
FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.
|
|
||||||
|
|
||||||
`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called:
|
|
||||||
|
|
||||||
```python
|
|
||||||
class MyRLEnv(Base5ActionRLEnv):
|
|
||||||
"""
|
|
||||||
User made custom environment. This class inherits from BaseEnvironment and gym.Env.
|
|
||||||
Users can override any functions from those parent classes. Here is an example
|
|
||||||
of a user customized `calculate_reward()` function.
|
|
||||||
"""
|
|
||||||
def calculate_reward(self, action: int) -> float:
|
|
||||||
if not self._is_valid(action):
|
|
||||||
self.tensorboard_log("invalid")
|
|
||||||
return -2
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)`. In this case the metric values are not incremented.
|
|
||||||
|
|
||||||
## Choosing a base environment
|
|
||||||
|
|
||||||
FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:
|
|
||||||
|
|
||||||
* the actions available in the `calculate_reward`
|
|
||||||
* the actions consumed by the user strategy
|
|
||||||
|
|
||||||
All of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`.
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
Only the `Base3ActionRLEnv` can do long-only training/trading (set the user strategy attribute `can_short = False`).
|
|
||||||
|
|||||||
@@ -41,11 +41,11 @@ FreqAI stores new model files after each successful training. These files become
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
"freqai": {
|
"freqai": {
|
||||||
"purge_old_models": 4,
|
"purge_old_models": true,
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
This will automatically purge all models older than the four most recently trained ones to save disk space. Inputing "0" will never purge any models.
|
This will automatically purge all models older than the two most recently trained ones to save disk space.
|
||||||
|
|
||||||
## Backtesting
|
## Backtesting
|
||||||
|
|
||||||
@@ -67,32 +67,18 @@ Backtesting mode requires [downloading the necessary data](#downloading-data-to-
|
|||||||
*want* to retrain a new model with the same config file, you should simply change the `identifier`.
|
*want* to retrain a new model with the same config file, you should simply change the `identifier`.
|
||||||
This way, you can return to using any model you wish by simply specifying the `identifier`.
|
This way, you can return to using any model you wish by simply specifying the `identifier`.
|
||||||
|
|
||||||
!!! Note
|
|
||||||
Backtesting calls `set_freqai_targets()` one time for each backtest window (where the number of windows is the full backtest timerange divided by the `backtest_period_days` parameter). Doing this means that the targets simulate dry/live behavior without look ahead bias. However, the definition of the features in `feature_engineering_*()` is performed once on the entire training timerange. This means that you should be sure that features do not look-ahead into the future.
|
|
||||||
More details about look-ahead bias can be found in [Common Mistakes](strategy-customization.md#common-mistakes-when-developing-strategies).
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Saving backtesting prediction data
|
### Saving prediction data
|
||||||
|
|
||||||
To allow for tweaking your strategy (**not** the features!), FreqAI will automatically save the predictions during backtesting so that they can be reused for future backtests and live runs using the same `identifier` model. This provides a performance enhancement geared towards enabling **high-level hyperopting** of entry/exit criteria.
|
To allow for tweaking your strategy (**not** the features!), FreqAI will automatically save the predictions during backtesting so that they can be reused for future backtests and live runs using the same `identifier` model. This provides a performance enhancement geared towards enabling **high-level hyperopting** of entry/exit criteria.
|
||||||
|
|
||||||
An additional directory called `backtesting_predictions`, which contains all the predictions stored in `feather` format, will be created in the `unique-id` folder.
|
An additional directory called `predictions`, which contains all the predictions stored in `hdf` format, will be created in the `unique-id` folder.
|
||||||
|
|
||||||
To change your **features**, you **must** set a new `identifier` in the config to signal to FreqAI to train new models.
|
To change your **features**, you **must** set a new `identifier` in the config to signal to FreqAI to train new models.
|
||||||
|
|
||||||
To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config.
|
To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config.
|
||||||
|
|
||||||
!!! Note
|
|
||||||
To ensure that the model can be reused, freqAI will call your strategy with a dataframe of length 1.
|
|
||||||
If your strategy requires more data than this to generate the same features, you can't reuse backtest predictions for live deployment and need to update your `identifier` for each new backtest.
|
|
||||||
|
|
||||||
### Backtest live collected predictions
|
|
||||||
|
|
||||||
FreqAI allow you to reuse live historic predictions through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study.
|
|
||||||
|
|
||||||
The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in the historic predictions file.
|
|
||||||
|
|
||||||
### Downloading data to cover the full backtest period
|
### Downloading data to cover the full backtest period
|
||||||
|
|
||||||
For live/dry deployments, FreqAI will download the necessary data automatically. However, to use backtesting functionality, you need to download the necessary data using `download-data` (details [here](data-download.md#data-downloading)). You need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that there is a sufficient amount of training data *before* the start of the backtesting time range. The amount of additional data can be roughly estimated by moving the start date of the time range backwards by `train_period_days` and the `startup_candle_count` (see the [parameter table](freqai-parameter-table.md) for detailed descriptions of these parameters) from the beginning of the desired backtesting time range.
|
For live/dry deployments, FreqAI will download the necessary data automatically. However, to use backtesting functionality, you need to download the necessary data using `download-data` (details [here](data-download.md#data-downloading)). You need to pay careful attention to understanding how much *additional* data needs to be downloaded to ensure that there is a sufficient amount of training data *before* the start of the backtesting time range. The amount of additional data can be roughly estimated by moving the start date of the time range backwards by `train_period_days` and the `startup_candle_count` (see the [parameter table](freqai-parameter-table.md) for detailed descriptions of these parameters) from the beginning of the desired backtesting time range.
|
||||||
@@ -123,7 +109,7 @@ In the presented example config, the user will only allow predictions on models
|
|||||||
|
|
||||||
Model training parameters are unique to the selected machine learning library. FreqAI allows you to set any parameter for any library using the `model_training_parameters` dictionary in the config. The example config (found in `config_examples/config_freqai.example.json`) shows some of the example parameters associated with `Catboost` and `LightGBM`, but you can add any parameters available in those libraries or any other machine learning library you choose to implement.
|
Model training parameters are unique to the selected machine learning library. FreqAI allows you to set any parameter for any library using the `model_training_parameters` dictionary in the config. The example config (found in `config_examples/config_freqai.example.json`) shows some of the example parameters associated with `Catboost` and `LightGBM`, but you can add any parameters available in those libraries or any other machine learning library you choose to implement.
|
||||||
|
|
||||||
Data split parameters are defined in `data_split_parameters` which can be any parameters associated with scikit-learn's `train_test_split()` function. `train_test_split()` has a parameters called `shuffle` which allows to shuffle the data or keep it unshuffled. This is particularly useful to avoid biasing training with temporally auto-correlated data. More details about these parameters can be found the [scikit-learn website](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
|
Data split parameters are defined in `data_split_parameters` which can be any parameters associated with Scikit-learn's `train_test_split()` function. `train_test_split()` has a parameters called `shuffle` which allows to shuffle the data or keep it unshuffled. This is particularly useful to avoid biasing training with temporally auto-correlated data. More details about these parameters can be found the [Scikit-learn website](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) (external website).
|
||||||
|
|
||||||
The FreqAI specific parameter `label_period_candles` defines the offset (number of candles into the future) used for the `labels`. In the presented [example config](freqai-configuration.md#setting-up-the-configuration-file), the user is asking for `labels` that are 24 candles in the future.
|
The FreqAI specific parameter `label_period_candles` defines the offset (number of candles into the future) used for the `labels`. In the presented [example config](freqai-configuration.md#setting-up-the-configuration-file), the user is asking for `labels` that are 24 candles in the future.
|
||||||
|
|
||||||
@@ -131,12 +117,6 @@ The FreqAI specific parameter `label_period_candles` defines the offset (number
|
|||||||
|
|
||||||
You can choose to adopt a continual learning scheme by setting `"continual_learning": true` in the config. By enabling `continual_learning`, after training an initial model from scratch, subsequent trainings will start from the final model state of the preceding training. This gives the new model a "memory" of the previous state. By default, this is set to `False` which means that all new models are trained from scratch, without input from previous models.
|
You can choose to adopt a continual learning scheme by setting `"continual_learning": true` in the config. By enabling `continual_learning`, after training an initial model from scratch, subsequent trainings will start from the final model state of the preceding training. This gives the new model a "memory" of the previous state. By default, this is set to `False` which means that all new models are trained from scratch, without input from previous models.
|
||||||
|
|
||||||
???+ danger "Continual learning enforces a constant parameter space"
|
|
||||||
Since `continual_learning` means that the model parameter space *cannot* change between trainings, `principal_component_analysis` is automatically disabled when `continual_learning` is enabled. Hint: PCA changes the parameter space and the number of features, learn more about PCA [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis).
|
|
||||||
|
|
||||||
???+ danger "Experimental functionality"
|
|
||||||
Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the mechanics available in FreqAI primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market.
|
|
||||||
|
|
||||||
## Hyperopt
|
## Hyperopt
|
||||||
|
|
||||||
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
|
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
|
||||||
@@ -148,7 +128,7 @@ freqtrade hyperopt --hyperopt-loss SharpeHyperOptLoss --strategy FreqaiExampleSt
|
|||||||
`hyperopt` requires you to have the data pre-downloaded in the same fashion as if you were doing [backtesting](#backtesting). In addition, you must consider some restrictions when trying to hyperopt FreqAI strategies:
|
`hyperopt` requires you to have the data pre-downloaded in the same fashion as if you were doing [backtesting](#backtesting). In addition, you must consider some restrictions when trying to hyperopt FreqAI strategies:
|
||||||
|
|
||||||
- The `--analyze-per-epoch` hyperopt parameter is not compatible with FreqAI.
|
- The `--analyze-per-epoch` hyperopt parameter is not compatible with FreqAI.
|
||||||
- It's not possible to hyperopt indicators in the `feature_engineering_*()` and `set_freqai_targets()` functions. This means that you cannot optimize model parameters using hyperopt. Apart from this exception, it is possible to optimize all other [spaces](hyperopt.md#running-hyperopt-with-smaller-search-space).
|
- It's not possible to hyperopt indicators in the `populate_any_indicators()` function. This means that you cannot optimize model parameters using hyperopt. Apart from this exception, it is possible to optimize all other [spaces](hyperopt.md#running-hyperopt-with-smaller-search-space).
|
||||||
- The backtesting instructions also apply to hyperopt.
|
- The backtesting instructions also apply to hyperopt.
|
||||||
|
|
||||||
The best method for combining hyperopt and FreqAI is to focus on hyperopting entry/exit thresholds/criteria. You need to focus on hyperopting parameters that are not used in your features. For example, you should not try to hyperopt rolling window lengths in the feature creation, or any part of the FreqAI config which changes predictions. In order to efficiently hyperopt the FreqAI strategy, FreqAI stores predictions as dataframes and reuses them. Hence the requirement to hyperopt entry/exit thresholds/criteria only.
|
The best method for combining hyperopt and FreqAI is to focus on hyperopting entry/exit thresholds/criteria. You need to focus on hyperopting parameters that are not used in your features. For example, you should not try to hyperopt rolling window lengths in the feature creation, or any part of the FreqAI config which changes predictions. In order to efficiently hyperopt the FreqAI strategy, FreqAI stores predictions as dataframes and reuses them. Hence the requirement to hyperopt entry/exit thresholds/criteria only.
|
||||||
@@ -162,26 +142,15 @@ dataframe['outlier'] = np.where(dataframe['DI_values'] > self.di_max.value/10, 1
|
|||||||
|
|
||||||
This specific hyperopt would help you understand the appropriate `DI_values` for your particular parameter space.
|
This specific hyperopt would help you understand the appropriate `DI_values` for your particular parameter space.
|
||||||
|
|
||||||
## Using Tensorboard
|
## Setting up a follower
|
||||||
|
|
||||||
!!! note "Availability"
|
You can indicate to the bot that it should not train models, but instead should look for models trained by a leader with a specific `identifier` by defining:
|
||||||
FreqAI includes tensorboard for a variety of models, including XGBoost, all PyTorch models, Reinforcement Learning, and Catboost. If you would like to see Tensorboard integrated into another model type, please open an issue on the [Freqtrade GitHub](https://github.com/freqtrade/freqtrade/issues)
|
|
||||||
|
|
||||||
!!! danger "Requirements"
|
```json
|
||||||
Tensorboard logging requires the FreqAI torch installation/docker image.
|
"freqai": {
|
||||||
|
"follow_mode": true,
|
||||||
|
"identifier": "example"
|
||||||
The easiest way to use tensorboard is to ensure `freqai.activate_tensorboard` is set to `True` (default setting) in your configuration file, run FreqAI, then open a separate shell and run:
|
}
|
||||||
|
|
||||||
```bash
|
|
||||||
cd freqtrade
|
|
||||||
tensorboard --logdir user_data/models/unique-id
|
|
||||||
```
|
```
|
||||||
|
|
||||||
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if you wish to view the output in your browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard).
|
In this example, the user has a leader bot with the `"identifier": "example"`. The leader bot is already running or is launched simultaneously with the follower. The follower will load models created by the leader and inference them to obtain predictions instead of training its own models.
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
|
|
||||||
!!! note "Deactivate for improved performance"
|
|
||||||
Tensorboard logging can slow down training and should be deactivated for production use.
|
|
||||||
|
|||||||
@@ -4,10 +4,7 @@
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
FreqAI is a software designed to automate a variety of tasks associated with training a predictive machine learning model to generate market forecasts given a set of input signals. In general, FreqAI aims to be a sandbox for easily deploying robust machine learning libraries on real-time data ([details](#freqai-position-in-open-source-machine-learning-landscape)).
|
FreqAI is a software designed to automate a variety of tasks associated with training a predictive machine learning model to generate market forecasts given a set of input features.
|
||||||
|
|
||||||
!!! Note
|
|
||||||
FreqAI is, and always will be, a not-for-profit, open-source project. FreqAI does *not* have a crypto token, FreqAI does *not* sell signals, and FreqAI does not have a domain besides the present [freqtrade documentation](https://www.freqtrade.io/en/latest/freqai/).
|
|
||||||
|
|
||||||
Features include:
|
Features include:
|
||||||
|
|
||||||
@@ -22,7 +19,7 @@ Features include:
|
|||||||
* **Automatic data download** - Compute timeranges for data downloads and update historic data (in live deployments)
|
* **Automatic data download** - Compute timeranges for data downloads and update historic data (in live deployments)
|
||||||
* **Cleaning of incoming data** - Handle NaNs safely before training and model inferencing
|
* **Cleaning of incoming data** - Handle NaNs safely before training and model inferencing
|
||||||
* **Dimensionality reduction** - Reduce the size of the training data via [Principal Component Analysis](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis)
|
* **Dimensionality reduction** - Reduce the size of the training data via [Principal Component Analysis](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis)
|
||||||
* **Deploying bot fleets** - Set one bot to train models while a fleet of [consumers](producer-consumer.md) use signals.
|
* **Deploying bot fleets** - Set one bot to train models while a fleet of [follower bots](freqai-running.md#setting-up-a-follower) inference the models and handle trades
|
||||||
|
|
||||||
## Quick start
|
## Quick start
|
||||||
|
|
||||||
@@ -34,9 +31,6 @@ freqtrade trade --config config_examples/config_freqai.example.json --strategy F
|
|||||||
|
|
||||||
You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
|
You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
|
||||||
|
|
||||||
!!! danger "Not for production"
|
|
||||||
The example strategy provided with the Freqtrade source code is designed for showcasing/testing a wide variety of FreqAI features. It is also designed to run on small computers so that it can be used as a benchmark between developers and users. It is *not* designed to be run in production.
|
|
||||||
|
|
||||||
An example strategy, prediction model, and config to use as a starting points can be found in
|
An example strategy, prediction model, and config to use as a starting points can be found in
|
||||||
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and
|
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and
|
||||||
`config_examples/config_freqai.example.json`, respectively.
|
`config_examples/config_freqai.example.json`, respectively.
|
||||||
@@ -72,33 +66,11 @@ pip install -r requirements-freqai.txt
|
|||||||
```
|
```
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Catboost will not be installed on low-powered arm devices (raspberry), since it does not provide wheels for this platform.
|
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since it does not provide wheels for this platform.
|
||||||
|
|
||||||
### Usage with docker
|
### Usage with docker
|
||||||
|
|
||||||
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices. If you would like to use PyTorch or Reinforcement learning, you should use the torch or RL tags, `image: freqtradeorg/freqtrade:develop_freqaitorch`, `image: freqtradeorg/freqtrade:develop_freqairl`.
|
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker-compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
||||||
|
|
||||||
!!! note "docker-compose-freqai.yml"
|
|
||||||
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file. This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
|
|
||||||
|
|
||||||
### FreqAI position in open-source machine learning landscape
|
|
||||||
|
|
||||||
Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`) has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citzen scientists" to use their basic Python skills for data exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data collection, storage, and handling presents a disparate challenge. [`FreqAI`](#freqai) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data.
|
|
||||||
|
|
||||||
### Citing FreqAI
|
|
||||||
|
|
||||||
FreqAI is [published in the Journal of Open Source Software](https://joss.theoj.org/papers/10.21105/joss.04864). If you find FreqAI useful in your research, please use the following citation:
|
|
||||||
|
|
||||||
```bibtex
|
|
||||||
@article{Caulk2022,
|
|
||||||
doi = {10.21105/joss.04864},
|
|
||||||
url = {https://doi.org/10.21105/joss.04864},
|
|
||||||
year = {2022}, publisher = {The Open Journal},
|
|
||||||
volume = {7}, number = {80}, pages = {4864},
|
|
||||||
author = {Robert A. Caulk and Elin Törnquist and Matthias Voppichler and Andrew R. Lawless and Ryan McMullan and Wagner Costa Santos and Timothy C. Pogue and Johan van der Vlugt and Stefan P. Gehring and Pascal Schmidt},
|
|
||||||
title = {FreqAI: generalizing adaptive modeling for chaotic time-series market forecasts},
|
|
||||||
journal = {Journal of Open Source Software} }
|
|
||||||
```
|
|
||||||
|
|
||||||
## Common pitfalls
|
## Common pitfalls
|
||||||
|
|
||||||
@@ -107,18 +79,6 @@ This is for performance reasons - FreqAI relies on making quick predictions/retr
|
|||||||
it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends
|
it needs to download all the training data at the beginning of a dry/live instance. FreqAI stores and appends
|
||||||
new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume).
|
new candles automatically for future retrains. This means that if new pairs arrive later in the dry run due to a volume pairlist, it will not have the data ready. However, FreqAI does work with the `ShufflePairlist` or a `VolumePairlist` which keeps the total pairlist constant (but reorders the pairs according to volume).
|
||||||
|
|
||||||
## Additional learning materials
|
|
||||||
|
|
||||||
Here we compile some external materials that provide deeper looks into various components of FreqAI:
|
|
||||||
|
|
||||||
- [Real-time head-to-head: Adaptive modeling of financial market data using XGBoost and CatBoost](https://emergentmethods.medium.com/real-time-head-to-head-adaptive-modeling-of-financial-market-data-using-xgboost-and-catboost-995a115a7495)
|
|
||||||
- [FreqAI - from price to prediction](https://emergentmethods.medium.com/freqai-from-price-to-prediction-6fadac18b665)
|
|
||||||
|
|
||||||
|
|
||||||
## Support
|
|
||||||
|
|
||||||
You can find support for FreqAI in a variety of places, including the [Freqtrade discord](https://discord.gg/Jd8JYeWHc4), the dedicated [FreqAI discord](https://discord.gg/7AMWACmbjT), and in [github issues](https://github.com/freqtrade/freqtrade/issues).
|
|
||||||
|
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
FreqAI is developed by a group of individuals who all contribute specific skillsets to the project.
|
FreqAI is developed by a group of individuals who all contribute specific skillsets to the project.
|
||||||
@@ -134,8 +94,6 @@ Code review and software architecture brainstorming:
|
|||||||
|
|
||||||
Software development:
|
Software development:
|
||||||
Wagner Costa @wagnercosta
|
Wagner Costa @wagnercosta
|
||||||
Emre Suzen @aemr3
|
|
||||||
Timothy Pogue @wizrds
|
|
||||||
|
|
||||||
Beta testing and bug reporting:
|
Beta testing and bug reporting:
|
||||||
Stefan Gehring @bloodhunter4rc, @longyu, Andrew Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau, Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza
|
Stefan Gehring @bloodhunter4rc, @longyu, Andrew Lawless @paranoidandy, Pascal Schmidt @smidelis, Ryan McMullan @smarmau, Juha Nykänen @suikula, Johan van der Vlugt @jooopiert, Richárd Józsa @richardjosza, Timothy Pogue @wizrds
|
||||||
|
|||||||
118
docs/hyperopt.md
@@ -14,7 +14,8 @@ To learn how to get data for the pairs and exchange you're interested in, head o
|
|||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Since 2021.4 release you no longer have to write a separate hyperopt class, but can configure the parameters directly in the strategy.
|
Since 2021.4 release you no longer have to write a separate hyperopt class, but can configure the parameters directly in the strategy.
|
||||||
The legacy method was supported up to 2021.8 and has been removed in 2021.9.
|
The legacy method is still supported, but it is no longer the recommended way of setting up hyperopt.
|
||||||
|
The legacy documentation is available at [Legacy Hyperopt](advanced-hyperopt.md#legacy-hyperopt).
|
||||||
|
|
||||||
## Install hyperopt dependencies
|
## Install hyperopt dependencies
|
||||||
|
|
||||||
@@ -30,7 +31,7 @@ The docker-image includes hyperopt dependencies, no further action needed.
|
|||||||
### Easy installation script (setup.sh) / Manual installation
|
### Easy installation script (setup.sh) / Manual installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
source .venv/bin/activate
|
source .env/bin/activate
|
||||||
pip install -r requirements-hyperopt.txt
|
pip install -r requirements-hyperopt.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -42,28 +43,28 @@ usage: freqtrade hyperopt [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH]
|
|||||||
[--recursive-strategy-search] [--freqaimodel NAME]
|
[--recursive-strategy-search] [--freqaimodel NAME]
|
||||||
[--freqaimodel-path PATH] [-i TIMEFRAME]
|
[--freqaimodel-path PATH] [-i TIMEFRAME]
|
||||||
[--timerange TIMERANGE]
|
[--timerange TIMERANGE]
|
||||||
[--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}]
|
[--data-format-ohlcv {json,jsongz,hdf5}]
|
||||||
[--max-open-trades INT]
|
[--max-open-trades INT]
|
||||||
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
|
[--stake-amount STAKE_AMOUNT] [--fee FLOAT]
|
||||||
[-p PAIRS [PAIRS ...]] [--hyperopt-path PATH]
|
[-p PAIRS [PAIRS ...]] [--hyperopt-path PATH]
|
||||||
[--eps] [--enable-protections]
|
[--eps] [--dmmp] [--enable-protections]
|
||||||
[--dry-run-wallet DRY_RUN_WALLET]
|
[--dry-run-wallet DRY_RUN_WALLET]
|
||||||
[--timeframe-detail TIMEFRAME_DETAIL] [-e INT]
|
[--timeframe-detail TIMEFRAME_DETAIL] [-e INT]
|
||||||
[--spaces {all,buy,sell,roi,stoploss,trailing,protection,trades,default} [{all,buy,sell,roi,stoploss,trailing,protection,trades,default} ...]]
|
[--spaces {all,buy,sell,roi,stoploss,trailing,protection,default} [{all,buy,sell,roi,stoploss,trailing,protection,default} ...]]
|
||||||
[--print-all] [--no-color] [--print-json] [-j JOBS]
|
[--print-all] [--no-color] [--print-json] [-j JOBS]
|
||||||
[--random-state INT] [--min-trades INT]
|
[--random-state INT] [--min-trades INT]
|
||||||
[--hyperopt-loss NAME] [--disable-param-export]
|
[--hyperopt-loss NAME] [--disable-param-export]
|
||||||
[--ignore-missing-spaces] [--analyze-per-epoch]
|
[--ignore-missing-spaces] [--analyze-per-epoch]
|
||||||
|
|
||||||
options:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
-i TIMEFRAME, --timeframe TIMEFRAME
|
-i TIMEFRAME, --timeframe TIMEFRAME
|
||||||
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
|
Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).
|
||||||
--timerange TIMERANGE
|
--timerange TIMERANGE
|
||||||
Specify what timerange of data to use.
|
Specify what timerange of data to use.
|
||||||
--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}
|
--data-format-ohlcv {json,jsongz,hdf5}
|
||||||
Storage format for downloaded candle (OHLCV) data.
|
Storage format for downloaded candle (OHLCV) data.
|
||||||
(default: `feather`).
|
(default: `json`).
|
||||||
--max-open-trades INT
|
--max-open-trades INT
|
||||||
Override the value of the `max_open_trades`
|
Override the value of the `max_open_trades`
|
||||||
configuration setting.
|
configuration setting.
|
||||||
@@ -80,6 +81,10 @@ options:
|
|||||||
--eps, --enable-position-stacking
|
--eps, --enable-position-stacking
|
||||||
Allow buying the same pair multiple times (position
|
Allow buying the same pair multiple times (position
|
||||||
stacking).
|
stacking).
|
||||||
|
--dmmp, --disable-max-market-positions
|
||||||
|
Disable applying `max_open_trades` during backtest
|
||||||
|
(same as setting `max_open_trades` to a very high
|
||||||
|
number).
|
||||||
--enable-protections, --enableprotections
|
--enable-protections, --enableprotections
|
||||||
Enable protections for backtesting.Will slow
|
Enable protections for backtesting.Will slow
|
||||||
backtesting down by a considerable amount, but will
|
backtesting down by a considerable amount, but will
|
||||||
@@ -91,7 +96,7 @@ options:
|
|||||||
Specify detail timeframe for backtesting (`1m`, `5m`,
|
Specify detail timeframe for backtesting (`1m`, `5m`,
|
||||||
`30m`, `1h`, `1d`).
|
`30m`, `1h`, `1d`).
|
||||||
-e INT, --epochs INT Specify number of epochs (default: 100).
|
-e INT, --epochs INT Specify number of epochs (default: 100).
|
||||||
--spaces {all,buy,sell,roi,stoploss,trailing,protection,trades,default} [{all,buy,sell,roi,stoploss,trailing,protection,trades,default} ...]
|
--spaces {all,buy,sell,roi,stoploss,trailing,protection,default} [{all,buy,sell,roi,stoploss,trailing,protection,default} ...]
|
||||||
Specify which parameters to hyperopt. Space-separated
|
Specify which parameters to hyperopt. Space-separated
|
||||||
list.
|
list.
|
||||||
--print-all Print all results, not only the best ones.
|
--print-all Print all results, not only the best ones.
|
||||||
@@ -129,8 +134,7 @@ options:
|
|||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
--logfile FILE, --log-file FILE
|
--logfile FILE Log to the file specified. Special values are:
|
||||||
Log to the file specified. Special values are:
|
|
||||||
'syslog', 'journald'. See the documentation for more
|
'syslog', 'journald'. See the documentation for more
|
||||||
details.
|
details.
|
||||||
-V, --version show program's version number and exit
|
-V, --version show program's version number and exit
|
||||||
@@ -139,7 +143,7 @@ Common arguments:
|
|||||||
`userdir/config.json` or `config.json` whichever
|
`userdir/config.json` or `config.json` whichever
|
||||||
exists). Multiple --config options may be used. Can be
|
exists). Multiple --config options may be used. Can be
|
||||||
set to `-` to read config from stdin.
|
set to `-` to read config from stdin.
|
||||||
-d PATH, --datadir PATH, --data-dir PATH
|
-d PATH, --datadir PATH
|
||||||
Path to directory with historical backtesting data.
|
Path to directory with historical backtesting data.
|
||||||
--userdir PATH, --user-data-dir PATH
|
--userdir PATH, --user-data-dir PATH
|
||||||
Path to userdata directory.
|
Path to userdata directory.
|
||||||
@@ -176,7 +180,6 @@ Rarely you may also need to create a [nested class](advanced-hyperopt.md#overrid
|
|||||||
* `generate_roi_table` - for custom ROI optimization (if you need the ranges for the values in the ROI table that differ from default or the number of entries (steps) in the ROI table which differs from the default 4 steps)
|
* `generate_roi_table` - for custom ROI optimization (if you need the ranges for the values in the ROI table that differ from default or the number of entries (steps) in the ROI table which differs from the default 4 steps)
|
||||||
* `stoploss_space` - for custom stoploss optimization (if you need the range for the stoploss parameter in the optimization hyperspace that differs from default)
|
* `stoploss_space` - for custom stoploss optimization (if you need the range for the stoploss parameter in the optimization hyperspace that differs from default)
|
||||||
* `trailing_space` - for custom trailing stop optimization (if you need the ranges for the trailing stop parameters in the optimization hyperspace that differ from default)
|
* `trailing_space` - for custom trailing stop optimization (if you need the ranges for the trailing stop parameters in the optimization hyperspace that differ from default)
|
||||||
* `max_open_trades_space` - for custom max_open_trades optimization (if you need the ranges for the max_open_trades parameter in the optimization hyperspace that differ from default)
|
|
||||||
|
|
||||||
!!! Tip "Quickly optimize ROI, stoploss and trailing stoploss"
|
!!! Tip "Quickly optimize ROI, stoploss and trailing stoploss"
|
||||||
You can quickly optimize the spaces `roi`, `stoploss` and `trailing` without changing anything in your strategy.
|
You can quickly optimize the spaces `roi`, `stoploss` and `trailing` without changing anything in your strategy.
|
||||||
@@ -333,15 +336,11 @@ There are four parameter types each suited for different purposes.
|
|||||||
* `CategoricalParameter` - defines a parameter with a predetermined number of choices.
|
* `CategoricalParameter` - defines a parameter with a predetermined number of choices.
|
||||||
* `BooleanParameter` - Shorthand for `CategoricalParameter([True, False])` - great for "enable" parameters.
|
* `BooleanParameter` - Shorthand for `CategoricalParameter([True, False])` - great for "enable" parameters.
|
||||||
|
|
||||||
### Parameter options
|
!!! Tip "Disabling parameter optimization"
|
||||||
|
Each parameter takes two boolean parameters:
|
||||||
There are two parameter options that can help you to quickly test various ideas:
|
* `load` - when set to `False` it will not load values configured in `buy_params` and `sell_params`.
|
||||||
|
* `optimize` - when set to `False` parameter will not be included in optimization process.
|
||||||
* `optimize` - when set to `False`, the parameter will not be included in optimization process. (Default: True)
|
Use these parameters to quickly prototype various ideas.
|
||||||
* `load` - when set to `False`, results of a previous hyperopt run (in `buy_params` and `sell_params` either in your strategy or the JSON output file) will not be used as the starting value for subsequent hyperopts. The default value specified in the parameter will be used instead. (Default: True)
|
|
||||||
|
|
||||||
!!! Tip "Effects of `load=False` on backtesting"
|
|
||||||
Be aware that setting the `load` option to `False` will mean backtesting will also use the default value specified in the parameter and *not* the value found through hyperoptimisation.
|
|
||||||
|
|
||||||
!!! Warning
|
!!! Warning
|
||||||
Hyperoptable parameters cannot be used in `populate_indicators` - as hyperopt does not recalculate indicators for each epoch, so the starting value would be used in this case.
|
Hyperoptable parameters cannot be used in `populate_indicators` - as hyperopt does not recalculate indicators for each epoch, so the starting value would be used in this case.
|
||||||
@@ -366,7 +365,7 @@ class MyAwesomeStrategy(IStrategy):
|
|||||||
timeframe = '15m'
|
timeframe = '15m'
|
||||||
minimal_roi = {
|
minimal_roi = {
|
||||||
"0": 0.10
|
"0": 0.10
|
||||||
}
|
},
|
||||||
# Define the parameter spaces
|
# Define the parameter spaces
|
||||||
buy_ema_short = IntParameter(3, 50, default=5)
|
buy_ema_short = IntParameter(3, 50, default=5)
|
||||||
buy_ema_long = IntParameter(15, 200, default=50)
|
buy_ema_long = IntParameter(15, 200, default=50)
|
||||||
@@ -433,14 +432,10 @@ While this strategy is most likely too simple to provide consistent profit, it s
|
|||||||
`range` property may also be used with `DecimalParameter` and `CategoricalParameter`. `RealParameter` does not provide this property due to infinite search space.
|
`range` property may also be used with `DecimalParameter` and `CategoricalParameter`. `RealParameter` does not provide this property due to infinite search space.
|
||||||
|
|
||||||
??? Hint "Performance tip"
|
??? Hint "Performance tip"
|
||||||
During normal hyperopting, indicators are calculated once and supplied to each epoch, linearly increasing RAM usage as a factor of increasing cores. As this also has performance implications, there are two alternatives to reduce RAM usage
|
During normal hyperopting, indicators are calculated once and supplied to each epoch, linearly increasing RAM usage as a factor of increasing cores. As this also has performance implications, hyperopt provides `--analyze-per-epoch` which will move the execution of `populate_indicators()` to the epoch process, calculating a single value per parameter per epoch instead of using the `.range` functionality. In this case, `.range` functionality will only return the actually used value. This will reduce RAM usage, but increase CPU usage. However, your hyperopting run will be less likely to fail due to Out Of Memory (OOM) issues.
|
||||||
|
|
||||||
* Move `ema_short` and `ema_long` calculations from `populate_indicators()` to `populate_entry_trend()`. Since `populate_entry_trend()` will be calculated every epoch, you don't need to use `.range` functionality.
|
In either case, you should try to use space ranges as small as possible this will improve CPU/RAM usage in both scenarios.
|
||||||
* hyperopt provides `--analyze-per-epoch` which will move the execution of `populate_indicators()` to the epoch process, calculating a single value per parameter per epoch instead of using the `.range` functionality. In this case, `.range` functionality will only return the actually used value.
|
|
||||||
|
|
||||||
These alternatives will reduce RAM usage, but increase CPU usage. However, your hyperopting run will be less likely to fail due to Out Of Memory (OOM) issues.
|
|
||||||
|
|
||||||
Whether you are using `.range` functionality or the alternatives above, you should try to use space ranges as small as possible since this will improve CPU/RAM usage.
|
|
||||||
|
|
||||||
## Optimizing protections
|
## Optimizing protections
|
||||||
|
|
||||||
@@ -585,15 +580,14 @@ Currently, the following loss functions are builtin:
|
|||||||
|
|
||||||
* `ShortTradeDurHyperOptLoss` - (default legacy Freqtrade hyperoptimization loss function) - Mostly for short trade duration and avoiding losses.
|
* `ShortTradeDurHyperOptLoss` - (default legacy Freqtrade hyperoptimization loss function) - Mostly for short trade duration and avoiding losses.
|
||||||
* `OnlyProfitHyperOptLoss` - takes only amount of profit into consideration.
|
* `OnlyProfitHyperOptLoss` - takes only amount of profit into consideration.
|
||||||
* `SharpeHyperOptLoss` - Optimizes Sharpe Ratio calculated on trade returns relative to standard deviation.
|
* `SharpeHyperOptLoss` - optimizes Sharpe Ratio calculated on trade returns relative to standard deviation.
|
||||||
* `SharpeHyperOptLossDaily` - Optimizes Sharpe Ratio calculated on **daily** trade returns relative to standard deviation.
|
* `SharpeHyperOptLossDaily` - optimizes Sharpe Ratio calculated on **daily** trade returns relative to standard deviation.
|
||||||
* `SortinoHyperOptLoss` - Optimizes Sortino Ratio calculated on trade returns relative to **downside** standard deviation.
|
* `SortinoHyperOptLoss` - optimizes Sortino Ratio calculated on trade returns relative to **downside** standard deviation.
|
||||||
* `SortinoHyperOptLossDaily` - optimizes Sortino Ratio calculated on **daily** trade returns relative to **downside** standard deviation.
|
* `SortinoHyperOptLossDaily` - optimizes Sortino Ratio calculated on **daily** trade returns relative to **downside** standard deviation.
|
||||||
* `MaxDrawDownHyperOptLoss` - Optimizes Maximum absolute drawdown.
|
* `MaxDrawDownHyperOptLoss` - Optimizes Maximum absolute drawdown.
|
||||||
* `MaxDrawDownRelativeHyperOptLoss` - Optimizes both maximum absolute drawdown while also adjusting for maximum relative drawdown.
|
* `MaxDrawDownRelativeHyperOptLoss` - Optimizes both maximum absolute drawdown while also adjusting for maximum relative drawdown.
|
||||||
* `CalmarHyperOptLoss` - Optimizes Calmar Ratio calculated on trade returns relative to max drawdown.
|
* `CalmarHyperOptLoss` - Optimizes Calmar Ratio calculated on trade returns relative to max drawdown.
|
||||||
* `ProfitDrawDownHyperOptLoss` - Optimizes by max Profit & min Drawdown objective. `DRAWDOWN_MULT` variable within the hyperoptloss file can be adjusted to be stricter or more flexible on drawdown purposes.
|
* `ProfitDrawDownHyperOptLoss` - Optimizes by max Profit & min Drawdown objective. `DRAWDOWN_MULT` variable within the hyperoptloss file can be adjusted to be stricter or more flexible on drawdown purposes.
|
||||||
* `MultiMetricHyperOptLoss` - Optimizes by several key metrics to achieve balanced performance. The primary focus is on maximizing Profit and minimizing Drawdown, while also considering additional metrics such as Profit Factor, Expectancy Ratio and Winrate. Moreover, it applies a penalty for epochs with a low number of trades, encouraging strategies with adequate trade frequency.
|
|
||||||
|
|
||||||
Creation of a custom loss function is covered in the [Advanced Hyperopt](advanced-hyperopt.md) part of the documentation.
|
Creation of a custom loss function is covered in the [Advanced Hyperopt](advanced-hyperopt.md) part of the documentation.
|
||||||
|
|
||||||
@@ -649,7 +643,6 @@ Legal values are:
|
|||||||
* `roi`: just optimize the minimal profit table for your strategy
|
* `roi`: just optimize the minimal profit table for your strategy
|
||||||
* `stoploss`: search for the best stoploss value
|
* `stoploss`: search for the best stoploss value
|
||||||
* `trailing`: search for the best trailing stop values
|
* `trailing`: search for the best trailing stop values
|
||||||
* `trades`: search for the best max open trades values
|
|
||||||
* `protection`: search for the best protection parameters (read the [protections section](#optimizing-protections) on how to properly define these)
|
* `protection`: search for the best protection parameters (read the [protections section](#optimizing-protections) on how to properly define these)
|
||||||
* `default`: `all` except `trailing` and `protection`
|
* `default`: `all` except `trailing` and `protection`
|
||||||
* space-separated list of any of the above values for example `--spaces roi stoploss`
|
* space-separated list of any of the above values for example `--spaces roi stoploss`
|
||||||
@@ -761,7 +754,7 @@ Override the `roi_space()` method if you need components of the ROI tables to va
|
|||||||
A sample for these methods can be found in the [overriding pre-defined spaces section](advanced-hyperopt.md#overriding-pre-defined-spaces).
|
A sample for these methods can be found in the [overriding pre-defined spaces section](advanced-hyperopt.md#overriding-pre-defined-spaces).
|
||||||
|
|
||||||
!!! Note "Reduced search space"
|
!!! Note "Reduced search space"
|
||||||
To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#overriding-pre-defined-spaces) to change this to your needs.
|
To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#pverriding-pre-defined-spaces) to change this to your needs.
|
||||||
|
|
||||||
### Understand Hyperopt Stoploss results
|
### Understand Hyperopt Stoploss results
|
||||||
|
|
||||||
@@ -803,7 +796,7 @@ If you have the `stoploss_space()` method in your custom hyperopt file, remove i
|
|||||||
Override the `stoploss_space()` method and define the desired range in it if you need stoploss values to vary in other range during hyperoptimization. A sample for this method can be found in the [overriding pre-defined spaces section](advanced-hyperopt.md#overriding-pre-defined-spaces).
|
Override the `stoploss_space()` method and define the desired range in it if you need stoploss values to vary in other range during hyperoptimization. A sample for this method can be found in the [overriding pre-defined spaces section](advanced-hyperopt.md#overriding-pre-defined-spaces).
|
||||||
|
|
||||||
!!! Note "Reduced search space"
|
!!! Note "Reduced search space"
|
||||||
To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#overriding-pre-defined-spaces) to change this to your needs.
|
To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#pverriding-pre-defined-spaces) to change this to your needs.
|
||||||
|
|
||||||
### Understand Hyperopt Trailing Stop results
|
### Understand Hyperopt Trailing Stop results
|
||||||
|
|
||||||
@@ -864,15 +857,18 @@ You can use the `--print-all` command line option if you would like to see all r
|
|||||||
|
|
||||||
## Position stacking and disabling max market positions
|
## Position stacking and disabling max market positions
|
||||||
|
|
||||||
In some situations, you may need to run Hyperopt (and Backtesting) with the `--eps`/`--enable-position-staking` argument, or you may need to set `max_open_trades` to a very high number to disable the limit on the number of open trades.
|
In some situations, you may need to run Hyperopt (and Backtesting) with the
|
||||||
|
`--eps`/`--enable-position-staking` and `--dmmp`/`--disable-max-market-positions` arguments.
|
||||||
|
|
||||||
By default, hyperopt emulates the behavior of the Freqtrade Live Run/Dry Run, where only one
|
By default, hyperopt emulates the behavior of the Freqtrade Live Run/Dry Run, where only one
|
||||||
open trade per pair is allowed. The total number of trades open for all pairs
|
open trade is allowed for every traded pair. The total number of trades open for all pairs
|
||||||
is also limited by the `max_open_trades` setting. During Hyperopt/Backtesting this may lead to
|
is also limited by the `max_open_trades` setting. During Hyperopt/Backtesting this may lead to
|
||||||
potential trades being hidden (or masked) by already open trades.
|
some potential trades to be hidden (or masked) by previously open trades.
|
||||||
|
|
||||||
The `--eps`/`--enable-position-stacking` argument allows emulation of buying the same pair multiple times.
|
The `--eps`/`--enable-position-stacking` argument allows emulation of buying the same pair multiple times,
|
||||||
Using `--max-open-trades` with a very high number will disable the limit on the number of open trades.
|
while `--dmmp`/`--disable-max-market-positions` disables applying `max_open_trades`
|
||||||
|
during Hyperopt/Backtesting (which is equal to setting `max_open_trades` to a very high
|
||||||
|
number).
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Dry/live runs will **NOT** use position stacking - therefore it does make sense to also validate the strategy without this as it's closer to reality.
|
Dry/live runs will **NOT** use position stacking - therefore it does make sense to also validate the strategy without this as it's closer to reality.
|
||||||
@@ -913,44 +909,12 @@ Your epochs should therefore be aligned to the possible values - or you should b
|
|||||||
|
|
||||||
After you run Hyperopt for the desired amount of epochs, you can later list all results for analysis, select only best or profitable once, and show the details for any of the epochs previously evaluated. This can be done with the `hyperopt-list` and `hyperopt-show` sub-commands. The usage of these sub-commands is described in the [Utils](utils.md#list-hyperopt-results) chapter.
|
After you run Hyperopt for the desired amount of epochs, you can later list all results for analysis, select only best or profitable once, and show the details for any of the epochs previously evaluated. This can be done with the `hyperopt-list` and `hyperopt-show` sub-commands. The usage of these sub-commands is described in the [Utils](utils.md#list-hyperopt-results) chapter.
|
||||||
|
|
||||||
## Output debug messages from your strategy
|
|
||||||
|
|
||||||
If you want to output debug messages from your strategy, you can use the `logging` module. By default, Freqtrade will output all messages with a level of `INFO` or higher.
|
|
||||||
|
|
||||||
|
|
||||||
``` python
|
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class MyAwesomeStrategy(IStrategy):
|
|
||||||
...
|
|
||||||
|
|
||||||
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
|
||||||
logger.info("This is a debug message")
|
|
||||||
...
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Note "using print"
|
|
||||||
Messages printed via `print()` will not be shown in the hyperopt output unless parallelism is disabled (`-j 1`).
|
|
||||||
It is recommended to use the `logging` module instead.
|
|
||||||
|
|
||||||
## Validate backtesting results
|
## Validate backtesting results
|
||||||
|
|
||||||
Once the optimized strategy has been implemented into your strategy, you should backtest this strategy to make sure everything is working as expected.
|
Once the optimized strategy has been implemented into your strategy, you should backtest this strategy to make sure everything is working as expected.
|
||||||
|
|
||||||
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt for Backtesting.
|
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt `--dmmp`/`--disable-max-market-positions` and `--eps`/`--enable-position-stacking` for Backtesting.
|
||||||
|
|
||||||
### Why do my backtest results not match my hyperopt results?
|
Should results not match, please double-check to make sure you transferred all conditions correctly.
|
||||||
|
Pay special care to the stoploss (and trailing stoploss) parameters, as these are often set in configuration files, which override changes to the strategy.
|
||||||
Should results not match, check the following factors:
|
You should also carefully review the log of your backtest to ensure that there were no parameters inadvertently set by the configuration (like `stoploss` or `trailing_stop`).
|
||||||
|
|
||||||
* You may have added parameters to hyperopt in `populate_indicators()` where they will be calculated only once **for all epochs**. If you are, for example, trying to optimise multiple SMA timeperiod values, the hyperoptable timeperiod parameter should be placed in `populate_entry_trend()` which is calculated every epoch. See [Optimizing an indicator parameter](https://www.freqtrade.io/en/stable/hyperopt/#optimizing-an-indicator-parameter).
|
|
||||||
* If you have disabled the auto-export of hyperopt parameters into the JSON parameters file, double-check to make sure you transferred all hyperopted values into your strategy correctly.
|
|
||||||
* Check the logs to verify what parameters are being set and what values are being used.
|
|
||||||
* Pay special care to the stoploss, max_open_trades and trailing stoploss parameters, as these are often set in configuration files, which override changes to the strategy. Check the logs of your backtest to ensure that there were no parameters inadvertently set by the configuration (like `stoploss`, `max_open_trades` or `trailing_stop`).
|
|
||||||
* Verify that you do not have an unexpected parameters JSON file overriding the parameters or the default hyperopt settings in your strategy.
|
|
||||||
* Verify that any protections that are enabled in backtesting are also enabled when hyperopting, and vice versa. When using `--space protection`, protections are auto-enabled for hyperopting.
|
|
||||||
|
|||||||
@@ -1,43 +0,0 @@
|
|||||||
## CORS
|
|
||||||
|
|
||||||
This whole section is only necessary in cross-origin cases (where you multiple bot API's running on `localhost:8081`, `localhost:8082`, ...), and want to combine them into one FreqUI instance.
|
|
||||||
|
|
||||||
??? info "Technical explanation"
|
|
||||||
All web-based front-ends are subject to [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) - Cross-Origin Resource Sharing.
|
|
||||||
Since most of the requests to the Freqtrade API must be authenticated, a proper CORS policy is key to avoid security problems.
|
|
||||||
Also, the standard disallows `*` CORS policies for requests with credentials, so this setting must be set appropriately.
|
|
||||||
|
|
||||||
Users can allow access from different origin URL's to the bot API via the `CORS_origins` configuration setting.
|
|
||||||
It consists of a list of allowed URL's that are allowed to consume resources from the bot's API.
|
|
||||||
|
|
||||||
Assuming your application is deployed as `https://frequi.freqtrade.io/home/` - this would mean that the following configuration becomes necessary:
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
//...
|
|
||||||
"jwt_secret_key": "somethingrandom",
|
|
||||||
"CORS_origins": ["https://frequi.freqtrade.io"],
|
|
||||||
//...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
In the following (pretty common) case, FreqUI is accessible on `http://localhost:8080/trade` (this is what you see in your navbar when navigating to freqUI).
|
|
||||||

|
|
||||||
|
|
||||||
The correct configuration for this case is `http://localhost:8080` - the main part of the URL including the port.
|
|
||||||
|
|
||||||
```jsonc
|
|
||||||
{
|
|
||||||
//...
|
|
||||||
"jwt_secret_key": "somethingrandom",
|
|
||||||
"CORS_origins": ["http://localhost:8080"],
|
|
||||||
//...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! Tip "trailing Slash"
|
|
||||||
The trailing slash is not allowed in the `CORS_origins` configuration (e.g. `"http://localhots:8080/"`).
|
|
||||||
Such a configuration will not take effect, and the cors errors will remain.
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot.
|
|
||||||