mirror of
https://github.com/freqtrade/freqtrade.git
synced 2026-01-20 14:00:38 +00:00
Merge branch 'freqtrade:develop' into allow-pairs-with-prefix-in-marketcap-pairList
This commit is contained in:
@@ -15,7 +15,7 @@ jobs:
|
||||
environment:
|
||||
name: develop
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
86
.github/workflows/ci.yml
vendored
86
.github/workflows/ci.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
|
||||
uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v6.6.0
|
||||
with:
|
||||
activate-environment: true
|
||||
enable-cache: true
|
||||
@@ -47,24 +47,9 @@ jobs:
|
||||
cache-suffix: "${{ matrix.python-version }}"
|
||||
prune-cache: false
|
||||
|
||||
- name: Cache_dependencies
|
||||
uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ~/dependencies/
|
||||
key: ${{ runner.os }}-dependencies
|
||||
|
||||
- name: TA binary *nix
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||
|
||||
- name: Installation - *nix
|
||||
run: |
|
||||
uv pip install --upgrade wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||
uv pip install -r requirements-dev.txt
|
||||
uv pip install -e ft_client/
|
||||
uv pip install -e .
|
||||
@@ -163,7 +148,7 @@ jobs:
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -174,7 +159,7 @@ jobs:
|
||||
check-latest: true
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
|
||||
uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v6.6.0
|
||||
with:
|
||||
activate-environment: true
|
||||
enable-cache: true
|
||||
@@ -183,18 +168,6 @@ jobs:
|
||||
cache-suffix: "${{ matrix.python-version }}"
|
||||
prune-cache: false
|
||||
|
||||
- name: Cache_dependencies
|
||||
uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ~/dependencies/
|
||||
key: ${{ matrix.os }}-dependencies
|
||||
|
||||
- name: TA binary *nix
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||
|
||||
- name: Installation - macOS (Brew)
|
||||
run: |
|
||||
# brew update
|
||||
@@ -222,9 +195,6 @@ jobs:
|
||||
- name: Installation (python)
|
||||
run: |
|
||||
uv pip install wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||
uv pip install -r requirements-dev.txt
|
||||
uv pip install -e ft_client/
|
||||
uv pip install -e .
|
||||
@@ -287,11 +257,11 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ windows-latest ]
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
os: [ "windows-2022", "windows-2025" ]
|
||||
python-version: ["3.11", "3.12", "3.13.6"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -301,7 +271,7 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
|
||||
uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v6.6.0
|
||||
with:
|
||||
activate-environment: true
|
||||
enable-cache: true
|
||||
@@ -315,7 +285,9 @@ jobs:
|
||||
function uvpipFunction { uv pip $args }
|
||||
Set-Alias -name pip -value uvpipFunction
|
||||
|
||||
./build_helpers/install_windows.ps1
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements-dev.txt
|
||||
pip install -e .
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
@@ -379,7 +351,7 @@ jobs:
|
||||
mypy-version-check:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -396,7 +368,7 @@ jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -408,7 +380,7 @@ jobs:
|
||||
docs-check:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -439,7 +411,7 @@ jobs:
|
||||
# Run pytest with "live" checks
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -449,7 +421,7 @@ jobs:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
|
||||
uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v6.6.0
|
||||
with:
|
||||
activate-environment: true
|
||||
enable-cache: true
|
||||
@@ -458,25 +430,9 @@ jobs:
|
||||
cache-suffix: "3.12"
|
||||
prune-cache: false
|
||||
|
||||
- name: Cache_dependencies
|
||||
uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ~/dependencies/
|
||||
key: ${{ runner.os }}-dependencies
|
||||
|
||||
|
||||
- name: TA binary *nix
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd ..
|
||||
|
||||
- name: Installation - *nix
|
||||
run: |
|
||||
uv pip install --upgrade wheel
|
||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||
uv pip install -r requirements-dev.txt
|
||||
uv pip install -e ft_client/
|
||||
uv pip install -e .
|
||||
@@ -508,9 +464,9 @@ jobs:
|
||||
|
||||
- name: Check user permission
|
||||
id: check
|
||||
uses: scherermichael-oss/action-has-permission@136e061bfe093832d87f090dd768e14e27a740d3 # 1.0.6
|
||||
uses: prince-chrismc/check-actor-permissions-action@d504e74ba31658f4cdf4fcfeb509d4c09736d88e # v3.0.2
|
||||
with:
|
||||
required-permission: write
|
||||
permission: "write"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -528,7 +484,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -575,7 +531,7 @@ jobs:
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -604,7 +560,7 @@ jobs:
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
2
.github/workflows/deploy-docs.yml
vendored
2
.github/workflows/deploy-docs.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
name: Deploy Docs through mike
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: true
|
||||
|
||||
|
||||
2
.github/workflows/devcontainer-build.yml
vendored
2
.github/workflows/devcontainer-build.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
packages: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Login to GitHub Container Registry
|
||||
|
||||
7
.github/workflows/docker-build.yml
vendored
7
.github/workflows/docker-build.yml
vendored
@@ -22,11 +22,12 @@ permissions:
|
||||
|
||||
jobs:
|
||||
deploy-docker:
|
||||
name: "Deploy Docker x64 and armv7l"
|
||||
runs-on: ubuntu-22.04
|
||||
if: github.repository == 'freqtrade/freqtrade'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
@@ -74,7 +75,7 @@ jobs:
|
||||
build_helpers/publish_docker_multi.sh
|
||||
|
||||
deploy-arm:
|
||||
name: "Deploy Docker"
|
||||
name: "Deploy Docker ARM64"
|
||||
permissions:
|
||||
packages: write
|
||||
needs: [ deploy-docker ]
|
||||
@@ -83,7 +84,7 @@ jobs:
|
||||
if: github.repository == 'freqtrade/freqtrade'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
2
.github/workflows/docker-update-readme.yml
vendored
2
.github/workflows/docker-update-readme.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
dockerHubDescription:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
2
.github/workflows/pre-commit-update.yml
vendored
2
.github/workflows/pre-commit-update.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
auto-update:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
4
.github/workflows/zizmor.yml
vendored
4
.github/workflows/zizmor.yml
vendored
@@ -21,9 +21,9 @@ jobs:
|
||||
# actions: read # only needed for private repos
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run zizmor 🌈
|
||||
uses: zizmorcore/zizmor-action@f52a838cfabf134edcbaa7c8b3677dde20045018 # v0.1.1
|
||||
uses: zizmorcore/zizmor-action@5ca5fc7a4779c5263a3ffa0e1f693009994446d1 # v0.1.2
|
||||
|
||||
@@ -30,9 +30,9 @@ repos:
|
||||
- types-filelock==3.2.7
|
||||
- types-requests==2.32.4.20250809
|
||||
- types-tabulate==0.9.0.20241207
|
||||
- types-python-dateutil==2.9.0.20250809
|
||||
- scipy-stubs==1.16.1.0
|
||||
- SQLAlchemy==2.0.42
|
||||
- types-python-dateutil==2.9.0.20250822
|
||||
- scipy-stubs==1.16.1.1
|
||||
- SQLAlchemy==2.0.43
|
||||
# stages: [push]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
@@ -44,13 +44,13 @@ repos:
|
||||
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: 'v0.12.7'
|
||||
rev: 'v0.12.9'
|
||||
hooks:
|
||||
- id: ruff
|
||||
- id: ruff-format
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
exclude: |
|
||||
@@ -83,6 +83,6 @@ repos:
|
||||
|
||||
# Ensure github actions remain safe
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v1.11.0
|
||||
rev: v1.12.1
|
||||
hooks:
|
||||
- id: zizmor
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.13.6-slim-bookworm AS base
|
||||
FROM python:3.13.7-slim-bookworm AS base
|
||||
|
||||
# Setup env
|
||||
ENV LANG=C.UTF-8
|
||||
@@ -27,11 +27,6 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& pip install --upgrade pip wheel
|
||||
|
||||
# Install TA-lib
|
||||
COPY build_helpers/* /tmp/
|
||||
RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib*
|
||||
ENV LD_LIBRARY_PATH=/usr/local/lib
|
||||
|
||||
# Install dependencies
|
||||
COPY --chown=ftuser:ftuser requirements.txt requirements-hyperopt.txt /freqtrade/
|
||||
USER ftuser
|
||||
@@ -49,7 +44,7 @@ USER ftuser
|
||||
# Install and execute
|
||||
COPY --chown=ftuser:ftuser . /freqtrade/
|
||||
|
||||
RUN pip install -e . --user --no-cache-dir --no-build-isolation \
|
||||
RUN pip install -e . --user --no-cache-dir \
|
||||
&& mkdir /freqtrade/user_data/ \
|
||||
&& freqtrade install-ui
|
||||
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
if [ -z "$1" ]; then
|
||||
INSTALL_LOC=/usr/local
|
||||
else
|
||||
INSTALL_LOC=${1}
|
||||
fi
|
||||
echo "Installing to ${INSTALL_LOC}"
|
||||
if [ -n "$2" ] || [ ! -f "${INSTALL_LOC}/lib/libta_lib.a" ]; then
|
||||
tar zxvf ta-lib-0.4.0-src.tar.gz
|
||||
cd ta-lib \
|
||||
&& sed -i.bak "s|0.00000001|0.000000000000000001 |g" src/ta_func/ta_utility.h \
|
||||
&& echo "Downloading gcc config.guess and config.sub" \
|
||||
&& curl -s 'https://raw.githubusercontent.com/gcc-mirror/gcc/master/config.guess' -o config.guess \
|
||||
&& curl -s 'https://raw.githubusercontent.com/gcc-mirror/gcc/master/config.sub' -o config.sub \
|
||||
&& ./configure --prefix=${INSTALL_LOC}/ \
|
||||
&& make
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed building ta-lib."
|
||||
cd .. && rm -rf ./ta-lib/
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$2" ]; then
|
||||
which sudo && sudo make install || make install
|
||||
if [ -x "$(command -v apt-get)" ]; then
|
||||
echo "Updating library path using ldconfig"
|
||||
sudo ldconfig
|
||||
fi
|
||||
else
|
||||
# Don't install with sudo
|
||||
make install
|
||||
fi
|
||||
|
||||
cd .. && rm -rf ./ta-lib/
|
||||
else
|
||||
echo "TA-lib already installed, skipping installation"
|
||||
fi
|
||||
@@ -1,10 +0,0 @@
|
||||
# vendored Wheels compiled via https://github.com/xmatthias/ta-lib-python/tree/ta_bundled_040
|
||||
|
||||
python -m pip install --upgrade pip
|
||||
python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
||||
|
||||
pip install -U wheel "numpy<3.0"
|
||||
pip install --only-binary ta-lib --find-links=build_helpers\ "ta-lib<0.6.0"
|
||||
|
||||
pip install -r requirements-dev.txt
|
||||
pip install -e .
|
||||
@@ -572,6 +572,7 @@
|
||||
"pairlists": {
|
||||
"description": "Configuration for pairlists.",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
build_helpers/ta_lib-0.6.5-cp311-cp311-manylinux_2_31_armv7l.whl
Normal file
BIN
build_helpers/ta_lib-0.6.5-cp311-cp311-manylinux_2_31_armv7l.whl
Normal file
Binary file not shown.
@@ -34,7 +34,7 @@ COPY build_helpers/* /tmp/
|
||||
# Install dependencies
|
||||
COPY --chown=ftuser:ftuser requirements.txt /freqtrade/
|
||||
USER ftuser
|
||||
RUN pip install --user --no-cache-dir "numpy<3.0" \
|
||||
RUN pip install --user --prefer-binary --no-cache-dir "numpy<3.0" build \
|
||||
&& pip install --user --no-index --find-links /tmp/ pyarrow TA-Lib \
|
||||
&& pip install --user --no-cache-dir -r requirements.txt
|
||||
|
||||
@@ -49,7 +49,7 @@ USER ftuser
|
||||
# Install and execute
|
||||
COPY --chown=ftuser:ftuser . /freqtrade/
|
||||
|
||||
RUN pip install -e . --user --no-cache-dir --no-build-isolation\
|
||||
RUN pip install -e . --user --no-cache-dir \
|
||||
&& mkdir /freqtrade/user_data/ \
|
||||
&& freqtrade install-ui
|
||||
|
||||
|
||||
@@ -46,29 +46,32 @@ ranging from the simplest (0) to the most detailed per pair, per buy and per sel
|
||||
|
||||
More options are available by running with the `-h` option.
|
||||
|
||||
### Using export-filename
|
||||
### Using backtest-filename
|
||||
|
||||
Normally, `backtesting-analysis` uses the latest backtest results, but if you wanted to go
|
||||
back to a previous backtest output, you need to supply the `--export-filename` option.
|
||||
You can supply the same parameter to `backtest-analysis` with the name of the final backtest
|
||||
output file. This allows you to keep historical versions of backtest results and re-analyse
|
||||
them at a later date:
|
||||
By default, `backtesting-analysis` processes the most recent backtest results in the `user_data/backtest_results` directory.
|
||||
If you want to analyze results from an earlier backtest, use the `--backtest-filename` option to specify the desired file. This lets you revisit and re-analyze historical backtest outputs at any time by providing the filename of the relevant backtest result:
|
||||
|
||||
``` bash
|
||||
freqtrade backtesting -c <config.json> --timeframe <tf> --strategy <strategy_name> --timerange=<timerange> --export=signals --export-filename=/tmp/mystrat_backtest.json
|
||||
freqtrade backtesting-analysis -c <config.json> --timeframe <tf> --strategy <strategy_name> --timerange <timerange> --export signals --backtest-filename backtest-result-2025-03-05_20-38-34.zip
|
||||
```
|
||||
|
||||
You should see some output similar to below in the logs with the name of the timestamped
|
||||
filename that was exported:
|
||||
|
||||
```
|
||||
2022-06-14 16:28:32,698 - freqtrade.misc - INFO - dumping json to "/tmp/mystrat_backtest-2022-06-14_16-28-32.json"
|
||||
2022-06-14 16:28:32,698 - freqtrade.misc - INFO - dumping json to "mystrat_backtest-2022-06-14_16-28-32.json"
|
||||
```
|
||||
|
||||
You can then use that filename in `backtesting-analysis`:
|
||||
|
||||
```
|
||||
freqtrade backtesting-analysis -c <config.json> --export-filename=/tmp/mystrat_backtest-2022-06-14_16-28-32.json
|
||||
freqtrade backtesting-analysis -c <config.json> --backtest-filename=mystrat_backtest-2022-06-14_16-28-32.json
|
||||
```
|
||||
|
||||
To use a result from a different results directory, you can use `--backtest-directory` to specify the directory
|
||||
|
||||
``` bash
|
||||
freqtrade backtesting-analysis -c <config.json> --backtest-directory custom_results/ --backtest-filename mystrat_backtest-2022-06-14_16-28-32.json
|
||||
```
|
||||
|
||||
### Tuning the buy tags and sell tags to display
|
||||
|
||||
@@ -105,12 +105,14 @@ Only use this if you're sure you'll not want to plot or analyze your results fur
|
||||
|
||||
---
|
||||
|
||||
Exporting trades to file specifying a custom filename
|
||||
Exporting trades to file specifying a custom directory
|
||||
|
||||
```bash
|
||||
freqtrade backtesting --strategy backtesting --export trades --export-filename=backtest_samplestrategy.json
|
||||
freqtrade backtesting --strategy backtesting --export trades --backtest-directory=user_data/custom-backtest-results
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Please also read about the [strategy startup period](strategy-customization.md#strategy-startup-period).
|
||||
|
||||
---
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
usage: freqtrade backtesting-analysis [-h] [-v] [--no-color] [--logfile FILE]
|
||||
[-V] [-c PATH] [-d PATH]
|
||||
[--userdir PATH]
|
||||
[--export-filename PATH]
|
||||
[--backtest-filename PATH]
|
||||
[--backtest-directory PATH]
|
||||
[--analysis-groups {0,1,2,3,4,5} [{0,1,2,3,4,5} ...]]
|
||||
[--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]]
|
||||
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
|
||||
@@ -14,10 +15,15 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--no-color] [--logfile FILE]
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--export-filename PATH, --backtest-filename PATH
|
||||
Use this filename for backtest results.Requires
|
||||
`--export` to be set as well. Example: `--export-filen
|
||||
ame=user_data/backtest_results/backtest_today.json`
|
||||
--backtest-filename PATH, --export-filename PATH
|
||||
Use this filename for backtest results.Example:
|
||||
`--backtest-
|
||||
filename=backtest_results_2020-09-27_16-20-48.json`.
|
||||
Assumes either `user_data/backtest_results/` or
|
||||
`--export-directory` as base directory.
|
||||
--backtest-directory PATH, --export-directory PATH
|
||||
Directory to use for backtest results. Example:
|
||||
`--export-directory=user_data/backtest_results/`.
|
||||
--analysis-groups {0,1,2,3,4,5} [{0,1,2,3,4,5} ...]
|
||||
grouping output - 0: simple wins/losses by enter tag,
|
||||
1: by enter_tag, 2: by enter_tag and exit_tag, 3: by
|
||||
|
||||
@@ -1,15 +1,22 @@
|
||||
```
|
||||
usage: freqtrade backtesting-show [-h] [-v] [--no-color] [--logfile FILE] [-V]
|
||||
[-c PATH] [-d PATH] [--userdir PATH]
|
||||
[--export-filename PATH] [--show-pair-list]
|
||||
[--backtest-filename PATH]
|
||||
[--backtest-directory PATH]
|
||||
[--show-pair-list]
|
||||
[--breakdown {day,week,month,year} [{day,week,month,year} ...]]
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--export-filename PATH, --backtest-filename PATH
|
||||
Use this filename for backtest results.Requires
|
||||
`--export` to be set as well. Example: `--export-filen
|
||||
ame=user_data/backtest_results/backtest_today.json`
|
||||
--backtest-filename PATH, --export-filename PATH
|
||||
Use this filename for backtest results.Example:
|
||||
`--backtest-
|
||||
filename=backtest_results_2020-09-27_16-20-48.json`.
|
||||
Assumes either `user_data/backtest_results/` or
|
||||
`--export-directory` as base directory.
|
||||
--backtest-directory PATH, --export-directory PATH
|
||||
Directory to use for backtest results. Example:
|
||||
`--export-directory=user_data/backtest_results/`.
|
||||
--show-pair-list Show backtesting pairlist sorted by profit.
|
||||
--breakdown {day,week,month,year} [{day,week,month,year} ...]
|
||||
Show backtesting breakdown per [day, week, month,
|
||||
|
||||
@@ -14,7 +14,8 @@ usage: freqtrade backtesting [-h] [-v] [--no-color] [--logfile FILE] [-V]
|
||||
[--timeframe-detail TIMEFRAME_DETAIL]
|
||||
[--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]]
|
||||
[--export {none,trades,signals}]
|
||||
[--export-filename PATH]
|
||||
[--backtest-filename PATH]
|
||||
[--backtest-directory PATH]
|
||||
[--breakdown {day,week,month,year} [{day,week,month,year} ...]]
|
||||
[--cache {none,day,week,month}]
|
||||
[--freqai-backtest-live-models] [--notes TEXT]
|
||||
@@ -61,10 +62,15 @@ options:
|
||||
becomes `backtest-data-SampleStrategy.json`
|
||||
--export {none,trades,signals}
|
||||
Export backtest results (default: trades).
|
||||
--export-filename PATH, --backtest-filename PATH
|
||||
Use this filename for backtest results.Requires
|
||||
`--export` to be set as well. Example: `--export-filen
|
||||
ame=user_data/backtest_results/backtest_today.json`
|
||||
--backtest-filename PATH, --export-filename PATH
|
||||
Use this filename for backtest results.Example:
|
||||
`--backtest-
|
||||
filename=backtest_results_2020-09-27_16-20-48.json`.
|
||||
Assumes either `user_data/backtest_results/` or
|
||||
`--export-directory` as base directory.
|
||||
--backtest-directory PATH, --export-directory PATH
|
||||
Directory to use for backtest results. Example:
|
||||
`--export-directory=user_data/backtest_results/`.
|
||||
--breakdown {day,week,month,year} [{day,week,month,year} ...]
|
||||
Show backtesting breakdown per [day, week, month,
|
||||
year].
|
||||
|
||||
@@ -15,7 +15,8 @@ usage: freqtrade lookahead-analysis [-h] [-v] [--no-color] [--logfile FILE]
|
||||
[--timeframe-detail TIMEFRAME_DETAIL]
|
||||
[--strategy-list STRATEGY_LIST [STRATEGY_LIST ...]]
|
||||
[--export {none,trades,signals}]
|
||||
[--export-filename PATH]
|
||||
[--backtest-filename PATH]
|
||||
[--backtest-directory PATH]
|
||||
[--freqai-backtest-live-models]
|
||||
[--minimum-trade-amount INT]
|
||||
[--targeted-trade-amount INT]
|
||||
@@ -60,10 +61,15 @@ options:
|
||||
becomes `backtest-data-SampleStrategy.json`
|
||||
--export {none,trades,signals}
|
||||
Export backtest results (default: trades).
|
||||
--export-filename PATH, --backtest-filename PATH
|
||||
Use this filename for backtest results.Requires
|
||||
`--export` to be set as well. Example: `--export-filen
|
||||
ame=user_data/backtest_results/backtest_today.json`
|
||||
--backtest-filename PATH, --export-filename PATH
|
||||
Use this filename for backtest results.Example:
|
||||
`--backtest-
|
||||
filename=backtest_results_2020-09-27_16-20-48.json`.
|
||||
Assumes either `user_data/backtest_results/` or
|
||||
`--export-directory` as base directory.
|
||||
--backtest-directory PATH, --export-directory PATH
|
||||
Directory to use for backtest results. Example:
|
||||
`--export-directory=user_data/backtest_results/`.
|
||||
--freqai-backtest-live-models
|
||||
Run backtest with ready models.
|
||||
--minimum-trade-amount INT
|
||||
|
||||
@@ -10,7 +10,7 @@ usage: freqtrade plot-dataframe [-h] [-v] [--no-color] [--logfile FILE] [-V]
|
||||
[--plot-limit INT] [--db-url PATH]
|
||||
[--trade-source {DB,file}]
|
||||
[--export {none,trades,signals}]
|
||||
[--export-filename PATH]
|
||||
[--backtest-filename PATH]
|
||||
[--timerange TIMERANGE] [-i TIMEFRAME]
|
||||
[--no-trades]
|
||||
|
||||
@@ -38,10 +38,12 @@ options:
|
||||
(backtest file)) Default: file
|
||||
--export {none,trades,signals}
|
||||
Export backtest results (default: trades).
|
||||
--export-filename PATH, --backtest-filename PATH
|
||||
Use this filename for backtest results.Requires
|
||||
`--export` to be set as well. Example: `--export-filen
|
||||
ame=user_data/backtest_results/backtest_today.json`
|
||||
--backtest-filename PATH, --export-filename PATH
|
||||
Use this filename for backtest results.Example:
|
||||
`--backtest-
|
||||
filename=backtest_results_2020-09-27_16-20-48.json`.
|
||||
Assumes either `user_data/backtest_results/` or
|
||||
`--export-directory` as base directory.
|
||||
--timerange TIMERANGE
|
||||
Specify what timerange of data to use.
|
||||
-i TIMEFRAME, --timeframe TIMEFRAME
|
||||
|
||||
@@ -6,7 +6,7 @@ usage: freqtrade plot-profit [-h] [-v] [--no-color] [--logfile FILE] [-V]
|
||||
[--freqaimodel NAME] [--freqaimodel-path PATH]
|
||||
[-p PAIRS [PAIRS ...]] [--timerange TIMERANGE]
|
||||
[--export {none,trades,signals}]
|
||||
[--export-filename PATH] [--db-url PATH]
|
||||
[--backtest-filename PATH] [--db-url PATH]
|
||||
[--trade-source {DB,file}] [-i TIMEFRAME]
|
||||
[--auto-open]
|
||||
|
||||
@@ -19,10 +19,12 @@ options:
|
||||
Specify what timerange of data to use.
|
||||
--export {none,trades,signals}
|
||||
Export backtest results (default: trades).
|
||||
--export-filename PATH, --backtest-filename PATH
|
||||
Use this filename for backtest results.Requires
|
||||
`--export` to be set as well. Example: `--export-filen
|
||||
ame=user_data/backtest_results/backtest_today.json`
|
||||
--backtest-filename PATH, --export-filename PATH
|
||||
Use this filename for backtest results.Example:
|
||||
`--backtest-
|
||||
filename=backtest_results_2020-09-27_16-20-48.json`.
|
||||
Assumes either `user_data/backtest_results/` or
|
||||
`--export-directory` as base directory.
|
||||
--db-url PATH Override trades database URL, this is useful in custom
|
||||
deployments (default: `sqlite:///tradesv3.sqlite` for
|
||||
Live Run mode, `sqlite:///tradesv3.dryrun.sqlite` for
|
||||
|
||||
@@ -566,8 +566,8 @@ Configuration:
|
||||
|
||||
### Understand order_time_in_force
|
||||
|
||||
The `order_time_in_force` configuration parameter defines the policy by which the order
|
||||
is executed on the exchange. Three commonly used time in force are:
|
||||
The `order_time_in_force` configuration parameter defines the policy by which the order is executed on the exchange.
|
||||
Commonly used time in force are:
|
||||
|
||||
**GTC (Good Till Canceled):**
|
||||
|
||||
@@ -589,11 +589,13 @@ is automatically cancelled by the exchange.
|
||||
Post only order. The order is either placed as a maker order, or it is canceled.
|
||||
This means the order must be placed on orderbook for at least time in an unfilled state.
|
||||
|
||||
Please check the [Exchange documentation](exchanges.md) for supported time in force values for your exchange.
|
||||
|
||||
#### time_in_force config
|
||||
|
||||
The `order_time_in_force` parameter contains a dict with entry and exit time in force policy values.
|
||||
This can be set in the configuration file or in the strategy.
|
||||
Values set in the configuration file overwrites values set in the strategy.
|
||||
Values set in the configuration file overwrite values from in the strategy, following the regular [precedence rules](#configuration-option-prevalence).
|
||||
|
||||
The possible values are: `GTC` (default), `FOK` or `IOC`.
|
||||
|
||||
@@ -605,9 +607,9 @@ The possible values are: `GTC` (default), `FOK` or `IOC`.
|
||||
```
|
||||
|
||||
!!! Warning
|
||||
This is ongoing work. For now, it is supported only for binance, gate and kucoin.
|
||||
Please don't change the default value unless you know what you are doing and have researched the impact of using different values for your particular exchange.
|
||||
|
||||
|
||||
### Fiat conversion
|
||||
|
||||
Freqtrade uses the Coingecko API to convert the coin value to it's corresponding fiat value for the Telegram reports.
|
||||
|
||||
@@ -227,7 +227,7 @@ Kucoin requires a passphrase for each api key, you will therefore need to add th
|
||||
}
|
||||
```
|
||||
|
||||
Kucoin supports [time_in_force](configuration.md#understand-order_time_in_force).
|
||||
Kucoin supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "FOK" (full-or-cancel) and "IOC" (immediate-or-cancel) settings.
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
Kucoin supports `stoploss_on_exchange` and can use both stop-loss-market and stop-loss-limit orders. It provides great advantages, so we recommend to benefit from it.
|
||||
@@ -271,7 +271,9 @@ Using the wrong exchange will result in the error "OKX Error 50119: API key does
|
||||
## Gate.io
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
Gate.io supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange..
|
||||
Gate.io supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
||||
|
||||
Gate.io supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), and "IOC" (immediate-or-cancel) settings.
|
||||
|
||||
Gate.io allows the use of `POINT` to pay for fees. As this is not a tradable currency (no regular market available), automatic fee calculations will fail (and default to a fee of 0).
|
||||
The configuration parameter `exchange.unknown_fee_rate` can be used to specify the exchange rate between Point and the stake currency. Obviously, changing the stake-currency will also require changes to this value.
|
||||
@@ -286,9 +288,15 @@ Without these permissions, the bot will not start correctly and show errors like
|
||||
|
||||
## Bybit
|
||||
|
||||
Futures trading on bybit is currently supported for USDT markets, and will use isolated futures mode.
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
Bybit (futures only) supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
||||
On futures, Bybit supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
||||
|
||||
On startup, freqtrade will set the position mode to "One-way Mode" for the whole (sub)account. This avoids making this call over and over again (slowing down bot operations), but means that changes to this setting may result in exceptions and errors.
|
||||
Bybit supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "FOK" (full-or-cancel), "IOC" (immediate-or-cancel) and "PO" (Post only) settings.
|
||||
|
||||
Futures trading on bybit is currently supported for isolated futures mode.
|
||||
|
||||
On startup, freqtrade will set the position mode to "One-way Mode" for the whole (sub)account. This avoids making this call over and over again (slowing down bot operations), but means that manual changes to this setting may result in exceptions and errors.
|
||||
|
||||
As bybit doesn't provide funding rate history, the dry-run calculation is used for live trades as well.
|
||||
|
||||
@@ -305,11 +313,6 @@ We do strongly recommend to limit all API keys to the IP you're going to use it
|
||||
We therefore recommend the usage of one subaccount per bot. This is especially important when using unified accounts.
|
||||
Other configurations (multiple bots on one account, manual non-bot trades on the bot account) are not supported and may lead to unexpected behavior.
|
||||
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
Bybit (futures only) supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
|
||||
On futures, Bybit supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
|
||||
|
||||
## Bitmart
|
||||
|
||||
Bitmart requires the API key Memo (the name you give the API key) to go along with the exchange key and secret.
|
||||
@@ -342,7 +345,11 @@ Bitget requires a passphrase for each api key, you will therefore need to add th
|
||||
}
|
||||
```
|
||||
|
||||
Bitget supports [time_in_force](configuration.md#understand-order_time_in_force).
|
||||
Bitget supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "FOK" (full-or-cancel), "IOC" (immediate-or-cancel) and "PO" (Post only) settings.
|
||||
|
||||
!!! Tip "Stoploss on Exchange"
|
||||
Bitget supports `stoploss_on_exchange` and can use both stop-loss-market and stop-loss-limit orders. It provides great advantages, so we recommend to benefit from it.
|
||||
You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type of stoploss shall be used.
|
||||
|
||||
## Hyperliquid
|
||||
|
||||
|
||||
@@ -46,7 +46,6 @@ These requirements apply to both [Script Installation](#script-installation) and
|
||||
* [pip](https://pip.pypa.io/en/stable/installing/)
|
||||
* [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
||||
* [virtualenv](https://virtualenv.pypa.io/en/stable/installation.html) (Recommended)
|
||||
* [TA-Lib](https://ta-lib.github.io/ta-lib-python/) (install instructions [below](#install-ta-lib))
|
||||
|
||||
### Install code
|
||||
|
||||
@@ -201,35 +200,6 @@ This option will hard reset your branch (only if you are on either `stable` or `
|
||||
|
||||
Make sure you fulfill the [Requirements](#requirements) and have downloaded the [Freqtrade repository](#freqtrade-repository).
|
||||
|
||||
### Install TA-Lib
|
||||
|
||||
#### TA-Lib script installation
|
||||
|
||||
```bash
|
||||
sudo ./build_helpers/install_ta-lib.sh
|
||||
```
|
||||
|
||||
!!! Note
|
||||
This will use the ta-lib tar.gz included in this repository.
|
||||
|
||||
##### TA-Lib manual installation
|
||||
|
||||
[Official installation guide](https://ta-lib.github.io/ta-lib-python/install.html)
|
||||
|
||||
```bash
|
||||
wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz
|
||||
tar xvzf ta-lib-0.4.0-src.tar.gz
|
||||
cd ta-lib
|
||||
sed -i.bak "s|0.00000001|0.000000000000000001 |g" src/ta_func/ta_utility.h
|
||||
./configure --prefix=/usr/local
|
||||
make
|
||||
sudo make install
|
||||
# On debian based systems (debian, ubuntu, ...) - updating ldconfig might be necessary.
|
||||
sudo ldconfig
|
||||
cd ..
|
||||
rm -rf ./ta-lib*
|
||||
```
|
||||
|
||||
### Setup Python virtual environment (virtualenv)
|
||||
|
||||
You will run freqtrade in separated `virtual environment`
|
||||
@@ -332,16 +302,6 @@ python3 -m pip install -r requirements.txt
|
||||
python3 -m pip install -e .
|
||||
```
|
||||
|
||||
Patch conda libta-lib (Linux only)
|
||||
|
||||
```bash
|
||||
# Ensure that the environment is active!
|
||||
conda activate freqtrade
|
||||
|
||||
cd build_helpers
|
||||
bash install_ta-lib.sh ${CONDA_PREFIX} nosudo
|
||||
```
|
||||
|
||||
[You are now ready](#you-are-ready) to run the bot.
|
||||
|
||||
### Important shortcuts
|
||||
|
||||
@@ -38,7 +38,7 @@ Many strategies, without the programmer knowing, have fallen prey to lookahead b
|
||||
This typically makes the strategy backtest look profitable, sometimes to extremes, but this is not realistic as the strategy is "cheating" by looking at data it would not have in dry or live modes.
|
||||
|
||||
The reason why strategies can "cheat" is because the freqtrade backtesting process populates the full dataframe including all candle timestamps at the outset.
|
||||
If the programmer is not careful or oblivious how things work internally
|
||||
If the programmer is not careful or oblivious how things work internally
|
||||
(which sometimes can be really hard to find out) then the strategy will look into the future.
|
||||
|
||||
This command is made to try to verify the validity in the form of the aforementioned lookahead bias.
|
||||
@@ -50,8 +50,7 @@ After this initial backtest runs, it will look if the `minimum-trade-amount` is
|
||||
If this happens, use a wider timerange to get more trades for the analysis, or use a timerange where more trades occur.
|
||||
|
||||
After setting the baseline it will then do additional backtest runs for every entry and exit separately.
|
||||
When these verification backtests complete, it will compare the indicators at the signal candles (both entry or exit)
|
||||
and report the bias.
|
||||
When these verification backtests complete, it will compare both dataframes (baseline and sliced) for any difference in columns' value and report the bias.
|
||||
After all signals have been verified or falsified a result table will be generated for the user to see.
|
||||
|
||||
### How to find and remove bias? How can I salvage a biased strategy?
|
||||
@@ -98,8 +97,8 @@ If the strategy has many different signals / signal types, it's up to you to sel
|
||||
This would lead to a false-negative, i.e. the strategy will be reported as non-biased.
|
||||
- `lookahead-analysis` has access to the same backtesting options and this can introduce problems.
|
||||
Please don't use any options like enabling position stacking as this will distort the number of checked signals.
|
||||
If you decide to do so, then make doubly sure that you won't ever run out of `max_open_trades` slots,
|
||||
If you decide to do so, then make doubly sure that you won't ever run out of `max_open_trades` slots,
|
||||
and that you have enough capital in the backtest wallet configuration.
|
||||
- In the results table, the `biased_indicators` column
|
||||
- In the results table, the `biased_indicators` column
|
||||
will falsely flag FreqAI target indicators defined in `set_freqai_targets()` as biased.
|
||||
**These are not biased and can safely be ignored.**
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
markdown==3.8.2
|
||||
mkdocs==1.6.1
|
||||
mkdocs-material==9.6.16
|
||||
mkdocs-material==9.6.18
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==10.16.1
|
||||
jinja2==3.1.6
|
||||
|
||||
@@ -31,6 +31,7 @@ The Order-type will be ignored if only one mode is available.
|
||||
| Binance | limit |
|
||||
| Binance Futures | market, limit |
|
||||
| Bingx | market, limit |
|
||||
| Bitget | market, limit |
|
||||
| HTX | limit |
|
||||
| kraken | market, limit |
|
||||
| Gate | limit |
|
||||
|
||||
@@ -42,7 +42,3 @@ freqtrade install-ui
|
||||
|
||||
Update-problems usually come missing dependencies (you didn't follow the above instructions) - or from updated dependencies, which fail to install (for example TA-lib).
|
||||
Please refer to the corresponding installation sections (common problems linked below)
|
||||
|
||||
Common problems and their solutions:
|
||||
|
||||
* [ta-lib update on windows](windows_installation.md#install-ta-lib)
|
||||
|
||||
@@ -38,30 +38,6 @@ cd freqtrade
|
||||
!!! Hint
|
||||
Using the [Anaconda Distribution](https://www.anaconda.com/distribution/) under Windows can greatly help with installation problems. Check out the [Anaconda installation section](installation.md#installation-with-conda) in the documentation for more information.
|
||||
|
||||
### Install ta-lib
|
||||
|
||||
Install ta-lib according to the [ta-lib documentation](https://github.com/TA-Lib/ta-lib-python#windows).
|
||||
|
||||
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), Freqtrade provides these dependencies (in the binary wheel format) for the latest 3 Python versions (3.11, 3.12 and 3.13) and for 64bit Windows.
|
||||
These Wheels are also used by CI running on windows, and are therefore tested together with freqtrade.
|
||||
|
||||
Other versions must be downloaded from the above link.
|
||||
|
||||
``` powershell
|
||||
cd \path\freqtrade
|
||||
python -m venv .venv
|
||||
.venv\Scripts\activate.ps1
|
||||
# optionally install ta-lib from wheel
|
||||
# Eventually adjust the below filename to match the downloaded wheel
|
||||
pip install --find-links build_helpers\ TA-Lib -U
|
||||
pip install -r requirements.txt
|
||||
pip install -e .
|
||||
freqtrade
|
||||
```
|
||||
|
||||
!!! Note "Use Powershell"
|
||||
The above installation script assumes you're using powershell on a 64bit windows.
|
||||
Commands for the legacy CMD windows console may differ.
|
||||
|
||||
### Error during installation on Windows
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ def start_analysis_entries_exits(args: dict[str, Any]) -> None:
|
||||
from freqtrade.data.entryexitanalysis import process_entry_exit_reasons
|
||||
|
||||
# Initialize configuration
|
||||
config = setup_utils_configuration(args, RunMode.BACKTEST)
|
||||
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
|
||||
|
||||
logger.info("Starting freqtrade in analysis mode")
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ ARGS_BACKTEST = [
|
||||
"strategy_list",
|
||||
"export",
|
||||
"exportfilename",
|
||||
"exportdirectory",
|
||||
"backtest_breakdown",
|
||||
"backtest_cache",
|
||||
"freqai_backtest_live_models",
|
||||
@@ -94,7 +95,12 @@ ARGS_LIST_FREQAIMODELS = ["freqaimodel_path", "print_one_column"]
|
||||
|
||||
ARGS_LIST_HYPEROPTS = ["hyperopt_path", "print_one_column"]
|
||||
|
||||
ARGS_BACKTEST_SHOW = ["exportfilename", "backtest_show_pair_list", "backtest_breakdown"]
|
||||
ARGS_BACKTEST_SHOW = [
|
||||
"exportfilename",
|
||||
"exportdirectory",
|
||||
"backtest_show_pair_list",
|
||||
"backtest_breakdown",
|
||||
]
|
||||
|
||||
ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all", "trading_mode", "dex_exchanges"]
|
||||
|
||||
@@ -233,6 +239,7 @@ ARGS_HYPEROPT_SHOW = [
|
||||
|
||||
ARGS_ANALYZE_ENTRIES_EXITS = [
|
||||
"exportfilename",
|
||||
"exportdirectory",
|
||||
"analysis_groups",
|
||||
"enter_reason_list",
|
||||
"exit_reason_list",
|
||||
|
||||
@@ -199,22 +199,29 @@ AVAILABLE_CLI_OPTIONS = {
|
||||
"(so `backtest-data.json` becomes `backtest-data-SampleStrategy.json`",
|
||||
nargs="+",
|
||||
),
|
||||
"export": Arg(
|
||||
"--export",
|
||||
help="Export backtest results (default: trades).",
|
||||
choices=constants.EXPORT_OPTIONS,
|
||||
),
|
||||
"backtest_notes": Arg(
|
||||
"--notes",
|
||||
help="Add notes to the backtest results.",
|
||||
metavar="TEXT",
|
||||
),
|
||||
"export": Arg(
|
||||
"--export",
|
||||
help="Export backtest results (default: trades).",
|
||||
choices=constants.EXPORT_OPTIONS,
|
||||
),
|
||||
"exportdirectory": Arg(
|
||||
"--backtest-directory",
|
||||
"--export-directory",
|
||||
help="Directory to use for backtest results. "
|
||||
"Example: `--export-directory=user_data/backtest_results/`. ",
|
||||
metavar="PATH",
|
||||
),
|
||||
"exportfilename": Arg(
|
||||
"--export-filename",
|
||||
"--backtest-filename",
|
||||
"--export-filename",
|
||||
help="Use this filename for backtest results."
|
||||
"Requires `--export` to be set as well. "
|
||||
"Example: `--export-filename=user_data/backtest_results/backtest_today.json`",
|
||||
"Example: `--backtest-filename=backtest_results_2020-09-27_16-20-48.json`. "
|
||||
"Assumes either `user_data/backtest_results/` or `--export-directory` as base directory.",
|
||||
metavar="PATH",
|
||||
),
|
||||
"disableparamexport": Arg(
|
||||
|
||||
@@ -146,6 +146,9 @@ def start_list_strategies(args: dict[str, Any]) -> None:
|
||||
strategy_objs = StrategyResolver.search_all_objects(
|
||||
config, not args["print_one_column"], config.get("recursive_strategy_search", False)
|
||||
)
|
||||
if not strategy_objs:
|
||||
logger.warning("No strategies found.")
|
||||
return
|
||||
# Sort alphabetically
|
||||
strategy_objs = sorted(strategy_objs, key=lambda x: x["name"])
|
||||
for obj in strategy_objs:
|
||||
|
||||
@@ -72,7 +72,7 @@ def start_backtesting_show(args: dict[str, Any]) -> None:
|
||||
from freqtrade.data.btanalysis import load_backtest_stats
|
||||
from freqtrade.optimize.optimize_reports import show_backtest_results, show_sorted_pairlist
|
||||
|
||||
results = load_backtest_stats(config["exportfilename"])
|
||||
results = load_backtest_stats(config["exportdirectory"], config["exportfilename"])
|
||||
|
||||
show_backtest_results(config, results)
|
||||
show_sorted_pairlist(config, results)
|
||||
|
||||
@@ -453,6 +453,7 @@ CONF_SCHEMA = {
|
||||
"pairlists": {
|
||||
"description": "Configuration for pairlists.",
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1381,6 +1382,7 @@ SCHEMA_TRADE_REQUIRED = [
|
||||
"entry_pricing",
|
||||
"stoploss",
|
||||
"minimal_roi",
|
||||
"pairlists",
|
||||
"internals",
|
||||
"dataformat_ohlcv",
|
||||
"dataformat_trades",
|
||||
@@ -1390,6 +1392,7 @@ SCHEMA_BACKTEST_REQUIRED = [
|
||||
"exchange",
|
||||
"stake_currency",
|
||||
"stake_amount",
|
||||
"pairlists",
|
||||
"dry_run_wallet",
|
||||
"dataformat_ohlcv",
|
||||
"dataformat_trades",
|
||||
|
||||
@@ -66,7 +66,8 @@ def validate_config_schema(conf: dict[str, Any], preliminary: bool = False) -> d
|
||||
return conf
|
||||
except ValidationError as e:
|
||||
logger.critical(f"Invalid configuration. Reason: {e}")
|
||||
raise ValidationError(best_match(Draft4Validator(conf_schema).iter_errors(conf)).message)
|
||||
result = best_match(FreqtradeValidator(conf_schema).iter_errors(conf))
|
||||
raise ConfigurationError(result.message)
|
||||
|
||||
|
||||
def validate_config_consistency(conf: dict[str, Any], *, preliminary: bool = False) -> None:
|
||||
|
||||
@@ -84,9 +84,6 @@ class Configuration:
|
||||
if "internals" not in config:
|
||||
config["internals"] = {}
|
||||
|
||||
if "pairlists" not in config:
|
||||
config["pairlists"] = []
|
||||
|
||||
# Keep a copy of the original configuration file
|
||||
config["original_config"] = deepcopy(config)
|
||||
|
||||
@@ -212,13 +209,28 @@ class Configuration:
|
||||
config.update({"datadir": create_datadir(config, self.args.get("datadir"))})
|
||||
logger.info("Using data directory: %s ...", config.get("datadir"))
|
||||
|
||||
self._args_to_config(
|
||||
config, argname="exportdirectory", logstring="Using {} as backtest directory ..."
|
||||
)
|
||||
|
||||
if self.args.get("exportfilename"):
|
||||
self._args_to_config(
|
||||
config, argname="exportfilename", logstring="Storing backtest results to {} ..."
|
||||
)
|
||||
config["exportfilename"] = Path(config["exportfilename"])
|
||||
else:
|
||||
config["exportfilename"] = config["user_data_dir"] / "backtest_results"
|
||||
if config.get("exportdirectory") and Path(config["exportdirectory"]).is_dir():
|
||||
logger.warning(
|
||||
"DEPRECATED: Using `--export-filename` with directories is deprecated, "
|
||||
"use `--backtest-directory` instead."
|
||||
)
|
||||
if config.get("exportdirectory") is None:
|
||||
# Fallback - assign export-directory directly.
|
||||
config["exportdirectory"] = config["exportfilename"]
|
||||
if not config.get("exportdirectory"):
|
||||
config["exportdirectory"] = config["user_data_dir"] / "backtest_results"
|
||||
if not config.get("exportfilename"):
|
||||
config["exportfilename"] = None
|
||||
config["exportdirectory"] = Path(config["exportdirectory"])
|
||||
|
||||
if self.args.get("show_sensitive"):
|
||||
logger.warning(
|
||||
|
||||
@@ -16,10 +16,7 @@ from .bt_fileutils import (
|
||||
load_backtest_data,
|
||||
load_backtest_metadata,
|
||||
load_backtest_stats,
|
||||
load_exit_signal_candles,
|
||||
load_file_from_zip,
|
||||
load_rejected_signals,
|
||||
load_signal_candles,
|
||||
load_trades,
|
||||
load_trades_from_db,
|
||||
trade_list_to_dataframe,
|
||||
|
||||
@@ -155,33 +155,55 @@ def load_backtest_metadata(filename: Path | str) -> dict[str, Any]:
|
||||
raise OperationalException("Unexpected error while loading backtest metadata.") from e
|
||||
|
||||
|
||||
def load_backtest_stats(filename: Path | str) -> BacktestResultType:
|
||||
def _normalize_filename(file_or_directory: Path | str, filename: Path | str | None) -> Path:
|
||||
"""
|
||||
Normalize the filename by ensuring it is a Path object.
|
||||
:param file_or_directory: The directory or file to normalize.
|
||||
:param filename: The filename to normalize.
|
||||
:return: A Path object representing the normalized filename.
|
||||
"""
|
||||
if isinstance(file_or_directory, str):
|
||||
file_or_directory = Path(file_or_directory)
|
||||
if file_or_directory.is_dir():
|
||||
if not filename:
|
||||
filename = get_latest_backtest_filename(file_or_directory)
|
||||
if Path(filename).is_file():
|
||||
fn = Path(filename)
|
||||
else:
|
||||
fn = file_or_directory / filename
|
||||
else:
|
||||
fn = file_or_directory
|
||||
return fn
|
||||
|
||||
|
||||
def load_backtest_stats(
|
||||
file_or_directory: Path | str, filename: Path | str | None = None
|
||||
) -> BacktestResultType:
|
||||
"""
|
||||
Load backtest statistics file.
|
||||
:param filename: pathlib.Path object, or string pointing to the file.
|
||||
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
|
||||
or absolute/relative path to the backtest results file.
|
||||
:param filename: Optional filename to load from (if different from the main filename).
|
||||
Only valid when loading from a directory.
|
||||
:return: a dictionary containing the resulting file.
|
||||
"""
|
||||
if isinstance(filename, str):
|
||||
filename = Path(filename)
|
||||
if filename.is_dir():
|
||||
filename = filename / get_latest_backtest_filename(filename)
|
||||
if not filename.is_file():
|
||||
raise ValueError(f"File {filename} does not exist.")
|
||||
logger.info(f"Loading backtest result from {filename}")
|
||||
fn = _normalize_filename(file_or_directory, filename)
|
||||
|
||||
if filename.suffix == ".zip":
|
||||
if not fn.is_file():
|
||||
raise ValueError(f"File or directory {fn} does not exist.")
|
||||
logger.info(f"Loading backtest result from {fn}")
|
||||
|
||||
if fn.suffix == ".zip":
|
||||
data = json_load(
|
||||
StringIO(
|
||||
load_file_from_zip(filename, filename.with_suffix(".json").name).decode("utf-8")
|
||||
)
|
||||
StringIO(load_file_from_zip(fn, fn.with_suffix(".json").name).decode("utf-8"))
|
||||
)
|
||||
else:
|
||||
with filename.open() as file:
|
||||
with fn.open() as file:
|
||||
data = json_load(file)
|
||||
|
||||
# Legacy list format does not contain metadata.
|
||||
if isinstance(data, dict):
|
||||
data["metadata"] = load_backtest_metadata(filename)
|
||||
data["metadata"] = load_backtest_metadata(fn)
|
||||
return data
|
||||
|
||||
|
||||
@@ -362,16 +384,21 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame:
|
||||
return df
|
||||
|
||||
|
||||
def load_backtest_data(filename: Path | str, strategy: str | None = None) -> pd.DataFrame:
|
||||
def load_backtest_data(
|
||||
file_or_directory: Path | str, strategy: str | None = None, filename: Path | str | None = None
|
||||
) -> pd.DataFrame:
|
||||
"""
|
||||
Load backtest data file.
|
||||
:param filename: pathlib.Path object, or string pointing to a file or directory
|
||||
Load backtest data file, returns a dataframe with the individual trades.
|
||||
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
|
||||
or absolute/relative path to the backtest results file.
|
||||
:param strategy: Strategy to load - mainly relevant for multi-strategy backtests
|
||||
Can also serve as protection to load the correct result.
|
||||
:param filename: Optional filename to load from (if different from the main filename).
|
||||
Only valid when loading from a directory.
|
||||
:return: a dataframe with the analysis results
|
||||
:raise: ValueError if loading goes wrong.
|
||||
"""
|
||||
data = load_backtest_stats(filename)
|
||||
data = load_backtest_stats(file_or_directory, filename)
|
||||
if not isinstance(data, list):
|
||||
# new, nested format
|
||||
if "strategy" not in data:
|
||||
@@ -430,20 +457,23 @@ def load_file_from_zip(zip_path: Path, filename: str) -> bytes:
|
||||
raise ValueError(f"Bad zip file: {zip_path}.") from None
|
||||
|
||||
|
||||
def load_backtest_analysis_data(backtest_dir: Path, name: str):
|
||||
def load_backtest_analysis_data(
|
||||
file_or_directory: Path,
|
||||
name: Literal["signals", "rejected", "exited"],
|
||||
filename: Path | str | None = None,
|
||||
):
|
||||
"""
|
||||
Load backtest analysis data either from a pickle file or from within a zip file
|
||||
:param backtest_dir: Directory containing backtest results
|
||||
:param file_or_directory: pathlib.Path object, or string pointing to the directory,
|
||||
or absolute/relative path to the backtest results file.
|
||||
:param name: Name of the analysis data to load (signals, rejected, exited)
|
||||
:param filename: Optional filename to load from (if different from the main filename).
|
||||
Only valid when loading from a directory.
|
||||
:return: Analysis data
|
||||
"""
|
||||
import joblib
|
||||
|
||||
if backtest_dir.is_dir():
|
||||
lbf = Path(get_latest_backtest_filename(backtest_dir))
|
||||
zip_path = backtest_dir / lbf
|
||||
else:
|
||||
zip_path = backtest_dir
|
||||
zip_path = _normalize_filename(file_or_directory, filename)
|
||||
|
||||
if zip_path.suffix == ".zip":
|
||||
# Load from zip file
|
||||
@@ -458,10 +488,10 @@ def load_backtest_analysis_data(backtest_dir: Path, name: str):
|
||||
|
||||
else:
|
||||
# Load from separate pickle file
|
||||
if backtest_dir.is_dir():
|
||||
scpf = Path(backtest_dir, f"{zip_path.stem}_{name}.pkl")
|
||||
if file_or_directory.is_dir():
|
||||
scpf = Path(file_or_directory, f"{zip_path.stem}_{name}.pkl")
|
||||
else:
|
||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
|
||||
scpf = Path(file_or_directory.parent / f"{file_or_directory.stem}_{name}.pkl")
|
||||
|
||||
try:
|
||||
with scpf.open("rb") as scp:
|
||||
@@ -473,27 +503,6 @@ def load_backtest_analysis_data(backtest_dir: Path, name: str):
|
||||
return None
|
||||
|
||||
|
||||
def load_rejected_signals(backtest_dir: Path):
|
||||
"""
|
||||
Load rejected signals from backtest directory
|
||||
"""
|
||||
return load_backtest_analysis_data(backtest_dir, "rejected")
|
||||
|
||||
|
||||
def load_signal_candles(backtest_dir: Path):
|
||||
"""
|
||||
Load signal candles from backtest directory
|
||||
"""
|
||||
return load_backtest_analysis_data(backtest_dir, "signals")
|
||||
|
||||
|
||||
def load_exit_signal_candles(backtest_dir: Path) -> dict[str, dict[str, pd.DataFrame]]:
|
||||
"""
|
||||
Load exit signal candles from backtest directory
|
||||
"""
|
||||
return load_backtest_analysis_data(backtest_dir, "exited")
|
||||
|
||||
|
||||
def trade_list_to_dataframe(trades: list[Trade] | list[LocalTrade]) -> pd.DataFrame:
|
||||
"""
|
||||
Convert list of Trade objects to pandas Dataframe
|
||||
|
||||
@@ -7,11 +7,9 @@ from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import Config
|
||||
from freqtrade.data.btanalysis import (
|
||||
BT_DATA_COLUMNS,
|
||||
load_backtest_analysis_data,
|
||||
load_backtest_data,
|
||||
load_backtest_stats,
|
||||
load_exit_signal_candles,
|
||||
load_rejected_signals,
|
||||
load_signal_candles,
|
||||
)
|
||||
from freqtrade.exceptions import ConfigurationError, OperationalException
|
||||
from freqtrade.util import print_df_rich_table
|
||||
@@ -332,7 +330,7 @@ def process_entry_exit_reasons(config: Config):
|
||||
do_rejected = config.get("analysis_rejected", False)
|
||||
to_csv = config.get("analysis_to_csv", False)
|
||||
csv_path = Path(
|
||||
config.get("analysis_csv_path", config["exportfilename"]), # type: ignore[arg-type]
|
||||
config.get("analysis_csv_path", config["exportdirectory"]), # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
if entry_only is True and exit_only is True:
|
||||
@@ -346,20 +344,30 @@ def process_entry_exit_reasons(config: Config):
|
||||
None if config.get("timerange") is None else str(config.get("timerange"))
|
||||
)
|
||||
try:
|
||||
backtest_stats = load_backtest_stats(config["exportfilename"])
|
||||
backtest_stats = load_backtest_stats(
|
||||
config["exportdirectory"], config["exportfilename"]
|
||||
)
|
||||
except ValueError as e:
|
||||
raise ConfigurationError(e) from e
|
||||
|
||||
for strategy_name, results in backtest_stats["strategy"].items():
|
||||
trades = load_backtest_data(config["exportfilename"], strategy_name)
|
||||
trades = load_backtest_data(
|
||||
config["exportdirectory"], strategy_name, config["exportfilename"]
|
||||
)
|
||||
|
||||
if trades is not None and not trades.empty:
|
||||
signal_candles = load_signal_candles(config["exportfilename"])
|
||||
exit_signals = load_exit_signal_candles(config["exportfilename"])
|
||||
signal_candles = load_backtest_analysis_data(
|
||||
config["exportdirectory"], "signals", config["exportfilename"]
|
||||
)
|
||||
exit_signals = load_backtest_analysis_data(
|
||||
config["exportdirectory"], "exited", config["exportfilename"]
|
||||
)
|
||||
|
||||
rej_df = None
|
||||
if do_rejected:
|
||||
rejected_signals_dict = load_rejected_signals(config["exportfilename"])
|
||||
rejected_signals_dict = load_backtest_analysis_data(
|
||||
config["exportdirectory"], "rejected", config["exportfilename"]
|
||||
)
|
||||
rej_df = prepare_results(
|
||||
rejected_signals_dict,
|
||||
strategy_name,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,18 @@
|
||||
import logging
|
||||
from datetime import timedelta
|
||||
|
||||
import ccxt
|
||||
|
||||
from freqtrade.enums import CandleType
|
||||
from freqtrade.exceptions import (
|
||||
DDosProtection,
|
||||
OperationalException,
|
||||
RetryableOrderError,
|
||||
TemporaryError,
|
||||
)
|
||||
from freqtrade.exchange import Exchange
|
||||
from freqtrade.exchange.exchange_types import FtHas
|
||||
from freqtrade.exchange.common import API_RETRY_COUNT, retrier
|
||||
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas
|
||||
from freqtrade.util.datetime_helpers import dt_now, dt_ts
|
||||
|
||||
|
||||
@@ -21,6 +30,10 @@ class Bitget(Exchange):
|
||||
"""
|
||||
|
||||
_ft_has: FtHas = {
|
||||
"stoploss_on_exchange": True,
|
||||
"stop_price_param": "stopPrice",
|
||||
"stop_price_prop": "stopPrice",
|
||||
"stoploss_order_types": {"limit": "limit", "market": "market"},
|
||||
"ohlcv_candle_limit": 200, # 200 for historical candles, 1000 for recent ones.
|
||||
"order_time_in_force": ["GTC", "FOK", "IOC", "PO"],
|
||||
}
|
||||
@@ -44,9 +57,72 @@ class Bitget(Exchange):
|
||||
timeframe_map = self._api.options["fetchOHLCV"]["maxRecentDaysPerTimeframe"]
|
||||
days = timeframe_map.get(timeframe, 30)
|
||||
|
||||
if candle_type in (CandleType.FUTURES, CandleType.SPOT) and (
|
||||
if candle_type in (CandleType.FUTURES, CandleType.SPOT, CandleType.MARK) and (
|
||||
not since_ms or dt_ts(dt_now() - timedelta(days=days)) < since_ms
|
||||
):
|
||||
return 1000
|
||||
|
||||
return super().ohlcv_candle_limit(timeframe, candle_type, since_ms)
|
||||
|
||||
def _convert_stop_order(self, pair: str, order_id: str, order: CcxtOrder) -> CcxtOrder:
|
||||
if order.get("status", "open") == "closed":
|
||||
# Use orderID as cliendOrderId filter to fetch the regular followup order.
|
||||
# Could be done with "fetch_order" - but clientOid as filter doesn't seem to work
|
||||
# https://www.bitget.com/api-doc/spot/trade/Get-Order-Info
|
||||
|
||||
for method in (
|
||||
self._api.fetch_canceled_and_closed_orders,
|
||||
self._api.fetch_open_orders,
|
||||
):
|
||||
orders = method(pair)
|
||||
orders_f = [order for order in orders if order["clientOrderId"] == order_id]
|
||||
if orders_f:
|
||||
order_reg = orders_f[0]
|
||||
self._log_exchange_response("fetch_stoploss_order1", order_reg)
|
||||
order_reg["id_stop"] = order_reg["id"]
|
||||
order_reg["id"] = order_id
|
||||
order_reg["type"] = "stoploss"
|
||||
order_reg["status_stop"] = "triggered"
|
||||
return order_reg
|
||||
order = self._order_contracts_to_amount(order)
|
||||
order["type"] = "stoploss"
|
||||
return order
|
||||
|
||||
def _fetch_stop_order_fallback(self, order_id: str, pair: str) -> CcxtOrder:
|
||||
params2 = {
|
||||
"stop": True,
|
||||
}
|
||||
for method in (
|
||||
self._api.fetch_open_orders,
|
||||
self._api.fetch_canceled_and_closed_orders,
|
||||
):
|
||||
try:
|
||||
orders = method(pair, params=params2)
|
||||
orders_f = [order for order in orders if order["id"] == order_id]
|
||||
if orders_f:
|
||||
order = orders_f[0]
|
||||
self._log_exchange_response("get_stop_order_fallback", order)
|
||||
return self._convert_stop_order(pair, order_id, order)
|
||||
except (ccxt.OrderNotFound, ccxt.InvalidOrder):
|
||||
pass
|
||||
except ccxt.DDoSProtection as e:
|
||||
raise DDosProtection(e) from e
|
||||
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
|
||||
raise TemporaryError(
|
||||
f"Could not get order due to {e.__class__.__name__}. Message: {e}"
|
||||
) from e
|
||||
except ccxt.BaseError as e:
|
||||
raise OperationalException(e) from e
|
||||
raise RetryableOrderError(f"StoplossOrder not found (pair: {pair} id: {order_id}).")
|
||||
|
||||
@retrier(retries=API_RETRY_COUNT)
|
||||
def fetch_stoploss_order(
|
||||
self, order_id: str, pair: str, params: dict | None = None
|
||||
) -> CcxtOrder:
|
||||
if self._config["dry_run"]:
|
||||
return self.fetch_dry_run_order(order_id)
|
||||
|
||||
return self._fetch_stop_order_fallback(order_id, pair)
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
|
||||
return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True})
|
||||
|
||||
@@ -70,34 +70,29 @@ class LookaheadAnalysis(BaseAnalysis):
|
||||
cut_df: DataFrame = cut_vars.indicators[current_pair]
|
||||
full_df: DataFrame = full_vars.indicators[current_pair]
|
||||
|
||||
# cut longer dataframe to length of the shorter
|
||||
full_df_cut = full_df[(full_df.date == cut_vars.compared_dt)].reset_index(drop=True)
|
||||
cut_df_cut = cut_df[(cut_df.date == cut_vars.compared_dt)].reset_index(drop=True)
|
||||
# trim full_df to the same index and length as cut_df
|
||||
cut_full_df = full_df.loc[cut_df.index]
|
||||
compare_df = cut_full_df.compare(cut_df)
|
||||
|
||||
# check if dataframes are not empty
|
||||
if full_df_cut.shape[0] != 0 and cut_df_cut.shape[0] != 0:
|
||||
# compare dataframes
|
||||
compare_df = full_df_cut.compare(cut_df_cut)
|
||||
if compare_df.shape[0] > 0:
|
||||
for col_name in compare_df:
|
||||
col_idx = compare_df.columns.get_loc(col_name)
|
||||
compare_df_row = compare_df.iloc[0]
|
||||
# compare_df now comprises tuples with [1] having either 'self' or 'other'
|
||||
if "other" in col_name[1]:
|
||||
continue
|
||||
self_value = compare_df_row.iloc[col_idx]
|
||||
other_value = compare_df_row.iloc[col_idx + 1]
|
||||
|
||||
if compare_df.shape[0] > 0:
|
||||
for col_name, values in compare_df.items():
|
||||
col_idx = compare_df.columns.get_loc(col_name)
|
||||
compare_df_row = compare_df.iloc[0]
|
||||
# compare_df now comprises tuples with [1] having either 'self' or 'other'
|
||||
if "other" in col_name[1]:
|
||||
continue
|
||||
self_value = compare_df_row.iloc[col_idx]
|
||||
other_value = compare_df_row.iloc[col_idx + 1]
|
||||
|
||||
# output differences
|
||||
if self_value != other_value:
|
||||
if not self.current_analysis.false_indicators.__contains__(col_name[0]):
|
||||
self.current_analysis.false_indicators.append(col_name[0])
|
||||
logger.info(
|
||||
f"=> found look ahead bias in indicator "
|
||||
f"{col_name[0]}. "
|
||||
f"{str(self_value)} != {str(other_value)}"
|
||||
)
|
||||
# output differences
|
||||
if self_value != other_value:
|
||||
if not self.current_analysis.false_indicators.__contains__(col_name[0]):
|
||||
self.current_analysis.false_indicators.append(col_name[0])
|
||||
logger.info(
|
||||
f"=> found look ahead bias in column "
|
||||
f"{col_name[0]}. "
|
||||
f"{str(self_value)} != {str(other_value)}"
|
||||
)
|
||||
|
||||
def prepare_data(self, varholder: VarHolder, pairs_to_load: list[DataFrame]):
|
||||
if "freqai" in self.local_config and "identifier" in self.local_config["freqai"]:
|
||||
@@ -132,7 +127,13 @@ class LookaheadAnalysis(BaseAnalysis):
|
||||
varholder.data, varholder.timerange = backtesting.load_bt_data()
|
||||
varholder.timeframe = backtesting.timeframe
|
||||
|
||||
varholder.indicators = backtesting.strategy.advise_all_indicators(varholder.data)
|
||||
temp_indicators = backtesting.strategy.advise_all_indicators(varholder.data)
|
||||
filled_indicators = dict()
|
||||
for pair, dataframe in temp_indicators.items():
|
||||
filled_indicators[pair] = backtesting.strategy.ft_advise_signals(
|
||||
dataframe, {"pair": pair}
|
||||
)
|
||||
varholder.indicators = filled_indicators
|
||||
varholder.result = self.get_result(backtesting, varholder.indicators)
|
||||
|
||||
def fill_entry_and_exit_varHolders(self, result_row):
|
||||
|
||||
@@ -15,7 +15,6 @@ from freqtrade.loggers.set_log_levels import (
|
||||
)
|
||||
from freqtrade.optimize.backtesting import Backtesting
|
||||
from freqtrade.optimize.base_analysis import BaseAnalysis, VarHolder
|
||||
from freqtrade.resolvers import StrategyResolver
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -33,13 +32,6 @@ class RecursiveAnalysis(BaseAnalysis):
|
||||
|
||||
super().__init__(config, strategy_obj)
|
||||
|
||||
strat = StrategyResolver.load_strategy(config)
|
||||
self._strat_scc = strat.startup_candle_count
|
||||
|
||||
if self._strat_scc not in self._startup_candle:
|
||||
self._startup_candle.append(self._strat_scc)
|
||||
self._startup_candle.sort()
|
||||
|
||||
self.partial_varHolder_array: list[VarHolder] = []
|
||||
self.partial_varHolder_lookahead_array: list[VarHolder] = []
|
||||
|
||||
@@ -149,6 +141,13 @@ class RecursiveAnalysis(BaseAnalysis):
|
||||
self.local_config["candle_type_def"] = prepare_data_config["candle_type_def"]
|
||||
backtesting._set_strategy(backtesting.strategylist[0])
|
||||
|
||||
strat = backtesting.strategy
|
||||
self._strat_scc = strat.startup_candle_count
|
||||
|
||||
if self._strat_scc not in self._startup_candle:
|
||||
self._startup_candle.append(self._strat_scc)
|
||||
self._startup_candle.sort()
|
||||
|
||||
varholder.data, varholder.timerange = backtesting.load_bt_data()
|
||||
varholder.timeframe = backtesting.timeframe
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ class RecursiveAnalysisSubFunctions:
|
||||
@staticmethod
|
||||
def text_table_recursive_analysis_instances(recursive_instances: list[RecursiveAnalysis]):
|
||||
startups = recursive_instances[0]._startup_candle
|
||||
strat_scc = recursive_instances[0]._strat_scc
|
||||
strat_scc = getattr(recursive_instances[0], "_strat_scc", 0) or 0
|
||||
headers = ["Indicators"]
|
||||
for candle in startups:
|
||||
if candle == strat_scc:
|
||||
|
||||
@@ -64,7 +64,7 @@ def store_backtest_results(
|
||||
:param market_change_data: Dataframe containing market change data
|
||||
:param analysis_results: Dictionary containing analysis results
|
||||
"""
|
||||
recordfilename: Path = config["exportfilename"]
|
||||
recordfilename: Path = config["exportdirectory"]
|
||||
zip_filename = _generate_filename(recordfilename, dtappendix, ".zip")
|
||||
base_filename = _generate_filename(recordfilename, dtappendix, "")
|
||||
json_filename = _generate_filename(recordfilename, dtappendix, ".json")
|
||||
|
||||
@@ -1187,10 +1187,13 @@ class LocalTrade:
|
||||
"""
|
||||
close_trade_value = self.calc_close_trade_value(rate, amount)
|
||||
|
||||
if amount is None or open_rate is None:
|
||||
if (amount is None) and (open_rate is None):
|
||||
open_trade_value = self.open_trade_value
|
||||
else:
|
||||
open_trade_value = self._calc_open_trade_value(amount, open_rate)
|
||||
# Fall back to trade.amount and self.open_rate if necessary
|
||||
open_trade_value = self._calc_open_trade_value(
|
||||
amount or self.amount, open_rate or self.open_rate
|
||||
)
|
||||
|
||||
if open_trade_value == 0.0:
|
||||
return 0.0
|
||||
|
||||
@@ -31,8 +31,9 @@ class AgeFilter(IPairList):
|
||||
|
||||
self._min_days_listed = self._pairlistconfig.get("min_days_listed", 10)
|
||||
self._max_days_listed = self._pairlistconfig.get("max_days_listed")
|
||||
self._def_candletype = self._config["candle_type_def"]
|
||||
|
||||
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
|
||||
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
|
||||
if self._min_days_listed < 1:
|
||||
raise OperationalException("AgeFilter requires min_days_listed to be >= 1")
|
||||
if self._min_days_listed > candle_limit:
|
||||
@@ -100,7 +101,7 @@ class AgeFilter(IPairList):
|
||||
:return: new allowlist
|
||||
"""
|
||||
needed_pairs: ListPairsWithTimeframes = [
|
||||
(p, "1d", self._config["candle_type_def"])
|
||||
(p, "1d", self._def_candletype)
|
||||
for p in pairlist
|
||||
if p not in self._symbolsChecked and p not in self._symbolsCheckFailed
|
||||
]
|
||||
@@ -116,8 +117,8 @@ class AgeFilter(IPairList):
|
||||
if self._enabled:
|
||||
for p in deepcopy(pairlist):
|
||||
daily_candles = (
|
||||
candles[(p, "1d", self._config["candle_type_def"])]
|
||||
if (p, "1d", self._config["candle_type_def"]) in candles
|
||||
candles[(p, "1d", self._def_candletype)]
|
||||
if (p, "1d", self._def_candletype) in candles
|
||||
else None
|
||||
)
|
||||
if not self._validate_pair_loc(p, daily_candles):
|
||||
|
||||
@@ -37,7 +37,6 @@ class MarketCapPairList(IPairList):
|
||||
self._refresh_period = self._pairlistconfig.get("refresh_period", 86400)
|
||||
self._categories = self._pairlistconfig.get("categories", [])
|
||||
self._marketcap_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period)
|
||||
self._def_candletype = self._config["candle_type_def"]
|
||||
|
||||
_coingecko_config = self._config.get("coingecko", {})
|
||||
|
||||
@@ -225,7 +224,7 @@ class MarketCapPairList(IPairList):
|
||||
if marketcap_list:
|
||||
filtered_pairlist: list[str] = []
|
||||
|
||||
market = self._config["trading_mode"]
|
||||
market = self._exchange._config["trading_mode"]
|
||||
pair_format = f"{self._stake_currency.upper()}"
|
||||
if market == "futures":
|
||||
pair_format += f":{self._stake_currency.upper()}"
|
||||
|
||||
@@ -91,7 +91,7 @@ class PercentChangePairList(IPairList):
|
||||
)
|
||||
|
||||
candle_limit = self._exchange.ohlcv_candle_limit(
|
||||
self._lookback_timeframe, self._config["candle_type_def"]
|
||||
self._lookback_timeframe, self._def_candletype
|
||||
)
|
||||
|
||||
if self._lookback_period > candle_limit:
|
||||
|
||||
@@ -40,7 +40,7 @@ class VolatilityFilter(IPairList):
|
||||
|
||||
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
|
||||
|
||||
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
|
||||
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
|
||||
if self._days < 1:
|
||||
raise OperationalException("VolatilityFilter requires lookback_days to be >= 1")
|
||||
if self._days > candle_limit:
|
||||
|
||||
@@ -89,7 +89,7 @@ class VolumePairList(IPairList):
|
||||
raise OperationalException(f"key {self._sort_key} not in {SORT_VALUES}")
|
||||
|
||||
candle_limit = self._exchange.ohlcv_candle_limit(
|
||||
self._lookback_timeframe, self._config["candle_type_def"]
|
||||
self._lookback_timeframe, self._def_candletype
|
||||
)
|
||||
if self._lookback_period < 0:
|
||||
raise OperationalException("VolumeFilter requires lookback_period to be >= 0")
|
||||
|
||||
@@ -34,7 +34,7 @@ class RangeStabilityFilter(IPairList):
|
||||
|
||||
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
|
||||
|
||||
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"])
|
||||
candle_limit = self._exchange.ohlcv_candle_limit("1d", self._def_candletype)
|
||||
if self._days < 1:
|
||||
raise OperationalException("RangeStabilityFilter requires lookback_days to be >= 1")
|
||||
if self._days > candle_limit:
|
||||
|
||||
@@ -54,6 +54,7 @@ def __run_pairlist(job_id: str, config_loc: Config):
|
||||
|
||||
with FtNoDBContext():
|
||||
exchange = get_exchange(config_loc)
|
||||
config_loc["candle_type_def"] = exchange._config["candle_type_def"]
|
||||
pairlists = PairListManager(exchange, config_loc)
|
||||
pairlists.refresh_pairlist()
|
||||
ApiBG.jobs[job_id]["result"] = {
|
||||
|
||||
@@ -342,8 +342,22 @@ class Telegram(RPCHandler):
|
||||
self._loop.run_until_complete(self._startup_telegram())
|
||||
|
||||
async def _startup_telegram(self) -> None:
|
||||
await self._app.initialize()
|
||||
await self._app.start()
|
||||
retries = 3
|
||||
attempt = 0
|
||||
while attempt < retries:
|
||||
try:
|
||||
await self._app.initialize()
|
||||
await self._app.start()
|
||||
break
|
||||
except Exception as ex:
|
||||
logger.error(
|
||||
"Error starting Telegram bot (attempt %d/%d): %s", attempt + 1, retries, ex
|
||||
)
|
||||
attempt += 1
|
||||
if attempt == retries:
|
||||
logger.warning("Telegram init failed.")
|
||||
return
|
||||
await asyncio.sleep(2)
|
||||
if self._app.updater:
|
||||
await self._app.updater.start_polling(
|
||||
bootstrap_retries=-1,
|
||||
|
||||
@@ -225,7 +225,6 @@ class RealParameter(NumericParameter):
|
||||
|
||||
class DecimalParameter(NumericParameter):
|
||||
default: float
|
||||
value: float
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -259,6 +258,14 @@ class DecimalParameter(NumericParameter):
|
||||
low=low, high=high, default=default, space=space, optimize=optimize, load=load, **kwargs
|
||||
)
|
||||
|
||||
@property
|
||||
def value(self) -> float:
|
||||
return self._value
|
||||
|
||||
@value.setter
|
||||
def value(self, new_value: float):
|
||||
self._value = round(new_value, self._decimals)
|
||||
|
||||
def get_space(self, name: str) -> "SKDecimal":
|
||||
"""
|
||||
Create optimization space.
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Requirements for freqtrade client library
|
||||
requests==2.32.4
|
||||
requests==2.32.5
|
||||
python-rapidjson==1.21
|
||||
|
||||
@@ -40,7 +40,7 @@ dependencies = [
|
||||
"jsonschema",
|
||||
"numpy>2.0,<3.0",
|
||||
"pandas>=2.2.0,<3.0",
|
||||
"TA-Lib<0.6",
|
||||
"TA-Lib<0.7",
|
||||
"ft-pandas-ta",
|
||||
"technical",
|
||||
"tabulate",
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
-r requirements-freqai-rl.txt
|
||||
-r docs/requirements-docs.txt
|
||||
|
||||
ruff==0.12.8
|
||||
ruff==0.12.10
|
||||
mypy==1.17.1
|
||||
pre-commit==4.3.0
|
||||
pytest==8.4.1
|
||||
@@ -18,15 +18,15 @@ pytest-timeout==2.4.0
|
||||
pytest-xdist==3.8.0
|
||||
isort==6.0.1
|
||||
# For datetime mocking
|
||||
time-machine==2.17.0
|
||||
time-machine==2.19.0
|
||||
|
||||
# Convert jupyter notebooks to markdown documents
|
||||
nbconvert==7.16.6
|
||||
|
||||
# mypy types
|
||||
scipy-stubs==1.16.1.0 # keep in sync with `scipy` in `requirements-hyperopt.txt`
|
||||
scipy-stubs==1.16.1.1 # keep in sync with `scipy` in `requirements-hyperopt.txt`
|
||||
types-cachetools==6.1.0.20250717
|
||||
types-filelock==3.2.7
|
||||
types-requests==2.32.4.20250809
|
||||
types-tabulate==0.9.0.20241207
|
||||
types-python-dateutil==2.9.0.20250809
|
||||
types-python-dateutil==2.9.0.20250822
|
||||
|
||||
@@ -7,6 +7,6 @@ scikit-learn==1.7.1
|
||||
joblib==1.5.1
|
||||
catboost==1.2.8; 'arm' not in platform_machine
|
||||
lightgbm==4.6.0
|
||||
xgboost==3.0.3
|
||||
xgboost==3.0.4
|
||||
tensorboard==2.20.0
|
||||
datasieve==0.1.9
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
# Required for hyperopt
|
||||
scipy==1.16.1
|
||||
scikit-learn==1.7.1
|
||||
filelock==3.18.0
|
||||
optuna==4.4.0
|
||||
filelock==3.19.1
|
||||
optuna==4.5.0
|
||||
cmaes==0.12.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Include all requirements to run the bot.
|
||||
-r requirements.txt
|
||||
|
||||
plotly==6.2.0
|
||||
plotly==6.3.0
|
||||
|
||||
@@ -1,25 +1,27 @@
|
||||
numpy==2.3.2
|
||||
pandas==2.3.1
|
||||
numpy==2.3.2; platform_machine != 'armv7l'
|
||||
numpy==2.2.4; platform_machine == 'armv7l'
|
||||
pandas==2.3.2; platform_machine != 'armv7l'
|
||||
pandas==2.2.3; platform_machine == 'armv7l'
|
||||
bottleneck==1.5.0
|
||||
numexpr==2.11.0
|
||||
# Indicator libraries
|
||||
ft-pandas-ta==0.3.15
|
||||
ta-lib==0.5.5
|
||||
ta-lib==0.6.5
|
||||
technical==1.5.2
|
||||
|
||||
ccxt==4.4.99
|
||||
ccxt==4.5.1
|
||||
cryptography==45.0.6
|
||||
aiohttp==3.12.15
|
||||
SQLAlchemy==2.0.42
|
||||
SQLAlchemy==2.0.43
|
||||
python-telegram-bot==22.3
|
||||
# can't be hard-pinned due to telegram-bot pinning httpx with ~
|
||||
httpx>=0.24.1
|
||||
humanize==4.12.3
|
||||
cachetools==6.1.0
|
||||
requests==2.32.4
|
||||
requests==2.32.5
|
||||
urllib3==2.5.0
|
||||
certifi==2025.8.3
|
||||
jsonschema==4.25.0
|
||||
jsonschema==4.25.1
|
||||
tabulate==0.9.0
|
||||
pycoingecko==3.2.0
|
||||
jinja2==3.1.6
|
||||
@@ -30,7 +32,7 @@ pyarrow==21.0.0; platform_machine != 'armv7l'
|
||||
# Load ticker files 30% faster
|
||||
python-rapidjson==1.21
|
||||
# Properly format api responses
|
||||
orjson==3.11.1
|
||||
orjson==3.11.2
|
||||
|
||||
# Notify systemd
|
||||
sdnotify==0.3.2
|
||||
|
||||
10
setup.ps1
10
setup.ps1
@@ -228,16 +228,6 @@ function Main {
|
||||
}
|
||||
}
|
||||
|
||||
if (-not (Test-Path "$VenvDir\Lib\site-packages\talib")) {
|
||||
# Install TA-Lib using the virtual environment's pip
|
||||
Write-Log "Installing TA-Lib using virtual environment's pip..."
|
||||
python -m pip install --find-links=build_helpers\ --prefer-binary TA-Lib 2>&1 | Out-File $LogFilePath -Append
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Log "Failed to install TA-Lib." -Level 'ERROR'
|
||||
Exit-Script -exitCode 1
|
||||
}
|
||||
}
|
||||
|
||||
# Present options for requirement files
|
||||
$SelectedIndices = Get-UserSelection -prompt "Select which requirement files to install:" -options $RequirementFiles -defaultChoice 'A'
|
||||
|
||||
|
||||
22
setup.sh
22
setup.sh
@@ -91,7 +91,6 @@ function updateenv() {
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
install_talib
|
||||
|
||||
${PYTHON} -m pip install --upgrade -r ${REQUIREMENTS} ${REQUIREMENTS_HYPEROPT} ${REQUIREMENTS_PLOT} ${REQUIREMENTS_FREQAI} ${REQUIREMENTS_FREQAI_RL}
|
||||
if [ $? -ne 0 ]; then
|
||||
@@ -118,25 +117,6 @@ function updateenv() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Install tab lib
|
||||
function install_talib() {
|
||||
if [ -f /usr/local/lib/libta_lib.a ] || [ -f /usr/local/lib/libta_lib.so ] || [ -f /usr/lib/libta_lib.so ]; then
|
||||
echo "ta-lib already installed, skipping"
|
||||
return
|
||||
fi
|
||||
|
||||
cd build_helpers && ./install_ta-lib.sh
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Quitting. Please fix the above error before continuing."
|
||||
cd ..
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
cd ..
|
||||
}
|
||||
|
||||
|
||||
# Install bot MacOS
|
||||
function install_macos() {
|
||||
if [ ! -x "$(command -v brew)" ]
|
||||
@@ -257,7 +237,7 @@ function install() {
|
||||
install_redhat
|
||||
else
|
||||
echo "This script does not support your OS."
|
||||
echo "If you have Python version 3.11 - 3.13, pip, virtualenv, ta-lib you can continue."
|
||||
echo "If you have Python version 3.11 - 3.13, pip, virtualenv installed you can continue."
|
||||
echo "Wait 10 seconds to continue the next install steps or use ctrl+c to interrupt this shell."
|
||||
sleep 10
|
||||
fi
|
||||
|
||||
@@ -1862,8 +1862,10 @@ def test_backtesting_show(mocker, testdatadir, capsys):
|
||||
sbr = mocker.patch("freqtrade.optimize.optimize_reports.show_backtest_results")
|
||||
args = [
|
||||
"backtesting-show",
|
||||
"--export-directory",
|
||||
f"{testdatadir / 'backtest_results'}",
|
||||
"--export-filename",
|
||||
f"{testdatadir / 'backtest_results/backtest-result.json'}",
|
||||
"backtest-result.json",
|
||||
"--show-pair-list",
|
||||
]
|
||||
pargs = get_args(args)
|
||||
|
||||
@@ -521,7 +521,11 @@ def patch_torch_initlogs(mocker) -> None:
|
||||
mocked_module = types.ModuleType(module_name)
|
||||
sys.modules[module_name] = mocked_module
|
||||
else:
|
||||
mocker.patch("torch._logging._init_logs")
|
||||
try:
|
||||
mocker.patch("torch._logging._init_logs")
|
||||
except ModuleNotFoundError:
|
||||
# Allow running limited tests to run without freqAI dependencies
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
||||
122
tests/exchange/test_bitget.py
Normal file
122
tests/exchange/test_bitget.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from datetime import timedelta
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from freqtrade.enums import CandleType
|
||||
from freqtrade.exceptions import RetryableOrderError
|
||||
from freqtrade.exchange.common import API_RETRY_COUNT
|
||||
from freqtrade.util import dt_now, dt_ts
|
||||
from tests.conftest import EXMS, get_patched_exchange
|
||||
from tests.exchange.test_exchange import ccxt_exceptionhandlers
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
def test_fetch_stoploss_order_bitget(default_conf, mocker):
|
||||
default_conf["dry_run"] = False
|
||||
mocker.patch("freqtrade.exchange.common.time.sleep")
|
||||
api_mock = MagicMock()
|
||||
|
||||
exchange = get_patched_exchange(mocker, default_conf, api_mock, exchange="bitget")
|
||||
|
||||
api_mock.fetch_open_orders = MagicMock(return_value=[])
|
||||
api_mock.fetch_canceled_and_closed_orders = MagicMock(return_value=[])
|
||||
|
||||
with pytest.raises(RetryableOrderError):
|
||||
exchange.fetch_stoploss_order("1234", "ETH/BTC")
|
||||
assert api_mock.fetch_open_orders.call_count == API_RETRY_COUNT + 1
|
||||
assert api_mock.fetch_canceled_and_closed_orders.call_count == API_RETRY_COUNT + 1
|
||||
|
||||
api_mock.fetch_open_orders.reset_mock()
|
||||
api_mock.fetch_canceled_and_closed_orders.reset_mock()
|
||||
|
||||
api_mock.fetch_canceled_and_closed_orders = MagicMock(
|
||||
return_value=[{"id": "1234", "status": "closed", "clientOrderId": "123455"}]
|
||||
)
|
||||
api_mock.fetch_open_orders = MagicMock(return_value=[{"id": "50110", "clientOrderId": "1234"}])
|
||||
|
||||
resp = exchange.fetch_stoploss_order("1234", "ETH/BTC")
|
||||
assert api_mock.fetch_open_orders.call_count == 2
|
||||
assert api_mock.fetch_canceled_and_closed_orders.call_count == 2
|
||||
|
||||
assert resp["id"] == "1234"
|
||||
assert resp["id_stop"] == "50110"
|
||||
assert resp["type"] == "stoploss"
|
||||
|
||||
default_conf["dry_run"] = True
|
||||
exchange = get_patched_exchange(mocker, default_conf, api_mock, exchange="bitget")
|
||||
dro_mock = mocker.patch(f"{EXMS}.fetch_dry_run_order", MagicMock(return_value={"id": "123455"}))
|
||||
|
||||
api_mock.fetch_open_orders.reset_mock()
|
||||
api_mock.fetch_canceled_and_closed_orders.reset_mock()
|
||||
resp = exchange.fetch_stoploss_order("1234", "ETH/BTC")
|
||||
|
||||
assert api_mock.fetch_open_orders.call_count == 0
|
||||
assert api_mock.fetch_canceled_and_closed_orders.call_count == 0
|
||||
assert dro_mock.call_count == 1
|
||||
|
||||
|
||||
def test_fetch_stoploss_order_bitget_exceptions(default_conf_usdt, mocker):
|
||||
default_conf_usdt["dry_run"] = False
|
||||
api_mock = MagicMock()
|
||||
|
||||
# Test emulation of the stoploss getters
|
||||
api_mock.fetch_canceled_and_closed_orders = MagicMock(return_value=[])
|
||||
|
||||
ccxt_exceptionhandlers(
|
||||
mocker,
|
||||
default_conf_usdt,
|
||||
api_mock,
|
||||
"bitget",
|
||||
"fetch_stoploss_order",
|
||||
"fetch_open_orders",
|
||||
retries=API_RETRY_COUNT + 1,
|
||||
order_id="12345",
|
||||
pair="ETH/USDT",
|
||||
)
|
||||
|
||||
|
||||
def test_bitget_ohlcv_candle_limit(mocker, default_conf_usdt):
|
||||
# This test is also a live test - so we're sure our limits are correct.
|
||||
api_mock = MagicMock()
|
||||
api_mock.options = {
|
||||
"fetchOHLCV": {
|
||||
"maxRecentDaysPerTimeframe": {
|
||||
"1m": 30,
|
||||
"5m": 30,
|
||||
"15m": 30,
|
||||
"30m": 30,
|
||||
"1h": 60,
|
||||
"4h": 60,
|
||||
"1d": 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exch = get_patched_exchange(mocker, default_conf_usdt, api_mock, exchange="bitget")
|
||||
timeframes = ("1m", "5m", "1h")
|
||||
|
||||
for timeframe in timeframes:
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE) == 200
|
||||
|
||||
start_time = dt_ts(dt_now() - timedelta(days=17))
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
|
||||
start_time = dt_ts(dt_now() - timedelta(days=48))
|
||||
length = 200 if timeframe in ("1m", "5m") else 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
|
||||
|
||||
start_time = dt_ts(dt_now() - timedelta(days=61))
|
||||
length = 200
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
|
||||
@@ -541,24 +541,24 @@ class TestCCXTExchange:
|
||||
for timeframe in timeframes:
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK) == 200
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE) == 200
|
||||
|
||||
start_time = dt_ts(dt_now() - timedelta(days=17))
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == 200
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
|
||||
start_time = dt_ts(dt_now() - timedelta(days=48))
|
||||
length = 200 if timeframe in ("1m", "5m") else 1000
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == 200
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
|
||||
|
||||
start_time = dt_ts(dt_now() - timedelta(days=61))
|
||||
length = 200
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.SPOT, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == 200
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == length
|
||||
assert exch.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 200
|
||||
|
||||
@@ -1528,6 +1528,25 @@ def test_handle_trade(
|
||||
assert trade.close_date is not None
|
||||
assert trade.exit_reason == "sell_signal1"
|
||||
|
||||
correct_profit_ratio = trade.calc_profit_ratio(
|
||||
rate=trade.close_rate, amount=trade.amount, open_rate=trade.open_rate
|
||||
)
|
||||
profit_ratio_1 = trade.calc_profit_ratio(rate=trade.close_rate, open_rate=trade.open_rate)
|
||||
profit_ratio_2 = trade.calc_profit_ratio(
|
||||
rate=trade.close_rate, open_rate=trade.open_rate * 1.02
|
||||
)
|
||||
profit_ratio_3 = trade.calc_profit_ratio(rate=trade.close_rate, amount=trade.amount)
|
||||
profit_ratio_4 = trade.calc_profit_ratio(rate=trade.close_rate)
|
||||
profit_ratio_5 = trade.calc_profit_ratio(
|
||||
rate=trade.close_rate, amount=trade.amount, open_rate=trade.open_rate * 1.02
|
||||
)
|
||||
assert correct_profit_ratio == close_profit
|
||||
assert correct_profit_ratio == profit_ratio_1
|
||||
assert correct_profit_ratio != profit_ratio_2
|
||||
assert correct_profit_ratio == profit_ratio_3
|
||||
assert correct_profit_ratio == profit_ratio_4
|
||||
assert correct_profit_ratio != profit_ratio_5
|
||||
|
||||
|
||||
@pytest.mark.parametrize("is_short", [False, True])
|
||||
def test_handle_overlapping_signals(
|
||||
@@ -5729,7 +5748,7 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None:
|
||||
@pytest.mark.parametrize(
|
||||
"data",
|
||||
[
|
||||
# tuple 1 - side amount, price
|
||||
# tuple 1 - side, amount, price
|
||||
# tuple 2 - amount, open_rate, stake_amount, cumulative_profit, realized_profit, rel_profit
|
||||
(
|
||||
(("buy", 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)),
|
||||
|
||||
@@ -236,7 +236,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmp_path):
|
||||
filename_last = tmp_path / LAST_BT_RESULT_FN
|
||||
_backup_file(filename_last, copy_file=True)
|
||||
assert not filename.is_file()
|
||||
default_conf["exportfilename"] = filename
|
||||
default_conf["exportdirectory"] = filename
|
||||
|
||||
store_backtest_results(default_conf, stats, "2022_01_01_15_05_13")
|
||||
|
||||
@@ -263,7 +263,7 @@ def test_store_backtest_results(testdatadir, mocker):
|
||||
zip_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.ZipFile")
|
||||
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||
store_backtest_results(
|
||||
{"exportfilename": testdatadir, "original_config": {}}, data, "2022_01_01_15_05_13"
|
||||
{"exportdirectory": testdatadir, "original_config": {}}, data, "2022_01_01_15_05_13"
|
||||
)
|
||||
|
||||
assert dump_mock.call_count == 2
|
||||
@@ -275,7 +275,7 @@ def test_store_backtest_results(testdatadir, mocker):
|
||||
zip_mock.reset_mock()
|
||||
filename = testdatadir / "testresult.json"
|
||||
store_backtest_results(
|
||||
{"exportfilename": filename, "original_config": {}}, data, "2022_01_01_15_05_13"
|
||||
{"exportdirectory": filename, "original_config": {}}, data, "2022_01_01_15_05_13"
|
||||
)
|
||||
assert dump_mock.call_count == 2
|
||||
assert zip_mock.call_count == 1
|
||||
@@ -287,7 +287,7 @@ def test_store_backtest_results(testdatadir, mocker):
|
||||
def test_store_backtest_results_real(tmp_path, caplog):
|
||||
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||
config = {
|
||||
"exportfilename": tmp_path,
|
||||
"exportdirectory": tmp_path,
|
||||
"original_config": {},
|
||||
}
|
||||
store_backtest_results(
|
||||
@@ -356,7 +356,7 @@ def test_write_read_backtest_candles(tmp_path):
|
||||
bt_results = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||
|
||||
mock_conf = {
|
||||
"exportfilename": tmp_path,
|
||||
"exportdirectory": tmp_path,
|
||||
"export": "signals",
|
||||
"runmode": "backtest",
|
||||
"original_config": {},
|
||||
@@ -393,33 +393,6 @@ def test_write_read_backtest_candles(tmp_path):
|
||||
|
||||
_clean_test_file(stored_file)
|
||||
|
||||
# test file exporting
|
||||
filename = tmp_path / "testresult"
|
||||
mock_conf["exportfilename"] = filename
|
||||
store_backtest_results(mock_conf, bt_results, sample_date, analysis_results=data)
|
||||
stored_file = tmp_path / f"testresult-{sample_date}.zip"
|
||||
signals_pkl = f"testresult-{sample_date}_signals.pkl"
|
||||
rejected_pkl = f"testresult-{sample_date}_rejected.pkl"
|
||||
exited_pkl = f"testresult-{sample_date}_exited.pkl"
|
||||
assert not (tmp_path / signals_pkl).is_file()
|
||||
assert stored_file.is_file()
|
||||
|
||||
with ZipFile(stored_file, "r") as zipf:
|
||||
assert signals_pkl in zipf.namelist()
|
||||
assert rejected_pkl in zipf.namelist()
|
||||
assert exited_pkl in zipf.namelist()
|
||||
|
||||
with zipf.open(signals_pkl) as scp:
|
||||
pickled_signal_candles2 = joblib.load(scp)
|
||||
|
||||
assert pickled_signal_candles2.keys() == candle_dict.keys()
|
||||
assert pickled_signal_candles2["DefStrat"].keys() == pickled_signal_candles2["DefStrat"].keys()
|
||||
assert pickled_signal_candles2["DefStrat"]["UNITTEST/BTC"].equals(
|
||||
pickled_signal_candles2["DefStrat"]["UNITTEST/BTC"]
|
||||
)
|
||||
|
||||
_clean_test_file(stored_file)
|
||||
|
||||
|
||||
def test_generate_pair_metrics():
|
||||
results = pd.DataFrame(
|
||||
|
||||
@@ -1883,7 +1883,12 @@ def test_pairlistmanager_no_pairlist(mocker, whitelist_conf):
|
||||
|
||||
whitelist_conf["pairlists"] = []
|
||||
|
||||
with pytest.raises(OperationalException, match=r"No Pairlist Handlers defined"):
|
||||
with pytest.raises(OperationalException, match=r"\[\] should be non-empty"):
|
||||
get_patched_freqtradebot(mocker, whitelist_conf)
|
||||
|
||||
del whitelist_conf["pairlists"]
|
||||
|
||||
with pytest.raises(OperationalException, match=r"'pairlists' is a required property"):
|
||||
get_patched_freqtradebot(mocker, whitelist_conf)
|
||||
|
||||
|
||||
|
||||
@@ -2802,8 +2802,8 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmp_path):
|
||||
ftbot.config["export"] = "trades"
|
||||
ftbot.config["backtest_cache"] = "day"
|
||||
ftbot.config["user_data_dir"] = tmp_path
|
||||
ftbot.config["exportfilename"] = tmp_path / "backtest_results"
|
||||
ftbot.config["exportfilename"].mkdir()
|
||||
ftbot.config["exportdirectory"] = tmp_path / "backtest_results"
|
||||
ftbot.config["exportdirectory"].mkdir()
|
||||
|
||||
# start backtesting
|
||||
data = {
|
||||
|
||||
@@ -12,20 +12,13 @@ from freqtrade.configuration import TimeRange
|
||||
from freqtrade.constants import CUSTOM_TAG_MAX_LENGTH
|
||||
from freqtrade.data.dataprovider import DataProvider
|
||||
from freqtrade.data.history import load_data
|
||||
from freqtrade.enums import ExitCheckTuple, ExitType, HyperoptState, SignalDirection
|
||||
from freqtrade.enums import ExitCheckTuple, ExitType, SignalDirection
|
||||
from freqtrade.exceptions import OperationalException, StrategyError
|
||||
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer
|
||||
from freqtrade.optimize.space import SKDecimal
|
||||
from freqtrade.persistence import PairLocks, Trade
|
||||
from freqtrade.resolvers import StrategyResolver
|
||||
from freqtrade.strategy.hyper import detect_parameters
|
||||
from freqtrade.strategy.parameters import (
|
||||
BaseParameter,
|
||||
BooleanParameter,
|
||||
CategoricalParameter,
|
||||
DecimalParameter,
|
||||
IntParameter,
|
||||
RealParameter,
|
||||
)
|
||||
from freqtrade.strategy.strategy_validation import StrategyResultValidator
|
||||
from freqtrade.util import dt_now
|
||||
@@ -930,95 +923,6 @@ def test_is_informative_pairs_callback(default_conf):
|
||||
assert [] == strategy.gather_informative_pairs()
|
||||
|
||||
|
||||
def test_hyperopt_parameters():
|
||||
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
|
||||
from optuna.distributions import CategoricalDistribution, FloatDistribution, IntDistribution
|
||||
|
||||
with pytest.raises(OperationalException, match=r"Name is determined.*"):
|
||||
IntParameter(low=0, high=5, default=1, name="hello")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"IntParameter space must be.*"):
|
||||
IntParameter(low=0, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"RealParameter space must be.*"):
|
||||
RealParameter(low=0, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"DecimalParameter space must be.*"):
|
||||
DecimalParameter(low=0, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"IntParameter space invalid\."):
|
||||
IntParameter([0, 10], high=7, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"RealParameter space invalid\."):
|
||||
RealParameter([0, 10], high=7, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"DecimalParameter space invalid\."):
|
||||
DecimalParameter([0, 10], high=7, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"CategoricalParameter space must.*"):
|
||||
CategoricalParameter(["aa"], default="aa", space="buy")
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
BaseParameter(opt_range=[0, 1], default=1, space="buy")
|
||||
|
||||
intpar = IntParameter(low=0, high=5, default=1, space="buy")
|
||||
assert intpar.value == 1
|
||||
assert isinstance(intpar.get_space(""), IntDistribution)
|
||||
assert isinstance(intpar.range, range)
|
||||
assert len(list(intpar.range)) == 1
|
||||
# Range contains ONLY the default / value.
|
||||
assert list(intpar.range) == [intpar.value]
|
||||
intpar.in_space = True
|
||||
|
||||
assert len(list(intpar.range)) == 6
|
||||
assert list(intpar.range) == [0, 1, 2, 3, 4, 5]
|
||||
|
||||
fltpar = RealParameter(low=0.0, high=5.5, default=1.0, space="buy")
|
||||
assert fltpar.value == 1
|
||||
assert isinstance(fltpar.get_space(""), FloatDistribution)
|
||||
|
||||
fltpar = DecimalParameter(low=0.0, high=0.5, default=0.14, decimals=1, space="buy")
|
||||
assert fltpar.value == 0.1
|
||||
assert isinstance(fltpar.get_space(""), SKDecimal)
|
||||
assert isinstance(fltpar.range, list)
|
||||
assert len(list(fltpar.range)) == 1
|
||||
# Range contains ONLY the default / value.
|
||||
assert list(fltpar.range) == [fltpar.value]
|
||||
fltpar.in_space = True
|
||||
assert len(list(fltpar.range)) == 6
|
||||
assert list(fltpar.range) == [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
|
||||
catpar = CategoricalParameter(
|
||||
["buy_rsi", "buy_macd", "buy_none"], default="buy_macd", space="buy"
|
||||
)
|
||||
assert catpar.value == "buy_macd"
|
||||
assert isinstance(catpar.get_space(""), CategoricalDistribution)
|
||||
assert isinstance(catpar.range, list)
|
||||
assert len(list(catpar.range)) == 1
|
||||
# Range contains ONLY the default / value.
|
||||
assert list(catpar.range) == [catpar.value]
|
||||
catpar.in_space = True
|
||||
assert len(list(catpar.range)) == 3
|
||||
assert list(catpar.range) == ["buy_rsi", "buy_macd", "buy_none"]
|
||||
|
||||
boolpar = BooleanParameter(default=True, space="buy")
|
||||
assert boolpar.value is True
|
||||
assert isinstance(boolpar.get_space(""), CategoricalDistribution)
|
||||
assert isinstance(boolpar.range, list)
|
||||
assert len(list(boolpar.range)) == 1
|
||||
|
||||
boolpar.in_space = True
|
||||
assert len(list(boolpar.range)) == 2
|
||||
|
||||
assert list(boolpar.range) == [True, False]
|
||||
|
||||
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
|
||||
assert len(list(intpar.range)) == 1
|
||||
assert len(list(fltpar.range)) == 1
|
||||
assert len(list(catpar.range)) == 1
|
||||
assert len(list(boolpar.range)) == 1
|
||||
|
||||
|
||||
def test_auto_hyperopt_interface(default_conf):
|
||||
default_conf.update({"strategy": "HyperoptableStrategyV2"})
|
||||
PairLocks.timeframe = default_conf["timeframe"]
|
||||
|
||||
136
tests/strategy/test_strategy_parameters.py
Normal file
136
tests/strategy/test_strategy_parameters.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# pragma pylint: disable=missing-docstring, C0103
|
||||
|
||||
import pytest
|
||||
|
||||
from freqtrade.enums import HyperoptState
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer
|
||||
from freqtrade.strategy.parameters import (
|
||||
BaseParameter,
|
||||
BooleanParameter,
|
||||
CategoricalParameter,
|
||||
DecimalParameter,
|
||||
IntParameter,
|
||||
RealParameter,
|
||||
)
|
||||
|
||||
|
||||
def test_hyperopt_int_parameter():
|
||||
from optuna.distributions import IntDistribution
|
||||
|
||||
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
|
||||
|
||||
with pytest.raises(OperationalException, match=r"Name is determined.*"):
|
||||
IntParameter(low=0, high=5, default=1, name="hello")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"IntParameter space must be.*"):
|
||||
IntParameter(low=0, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"IntParameter space invalid\."):
|
||||
IntParameter([0, 10], high=7, default=5, space="buy")
|
||||
|
||||
intpar = IntParameter(low=0, high=5, default=1, space="buy")
|
||||
assert intpar.value == 1
|
||||
assert isinstance(intpar.get_space(""), IntDistribution)
|
||||
assert isinstance(intpar.range, range)
|
||||
assert len(list(intpar.range)) == 1
|
||||
# Range contains ONLY the default / value.
|
||||
assert list(intpar.range) == [intpar.value]
|
||||
intpar.in_space = True
|
||||
|
||||
assert len(list(intpar.range)) == 6
|
||||
assert list(intpar.range) == [0, 1, 2, 3, 4, 5]
|
||||
|
||||
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
|
||||
assert len(list(intpar.range)) == 1
|
||||
|
||||
|
||||
def test_hyperopt_real_parameter():
|
||||
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
|
||||
from optuna.distributions import FloatDistribution
|
||||
|
||||
with pytest.raises(OperationalException, match=r"RealParameter space must be.*"):
|
||||
RealParameter(low=0, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"RealParameter space invalid\."):
|
||||
RealParameter([0, 10], high=7, default=5, space="buy")
|
||||
|
||||
fltpar = RealParameter(low=0.0, high=5.5, default=1.0, space="buy")
|
||||
assert fltpar.value == 1.0
|
||||
assert isinstance(fltpar.get_space(""), FloatDistribution)
|
||||
|
||||
assert not hasattr(fltpar, "range")
|
||||
|
||||
|
||||
def test_hyperopt_decimal_parameter():
|
||||
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
|
||||
# TODO: Check for get_space??
|
||||
from freqtrade.optimize.space import SKDecimal
|
||||
|
||||
with pytest.raises(OperationalException, match=r"DecimalParameter space must be.*"):
|
||||
DecimalParameter(low=0, default=5, space="buy")
|
||||
|
||||
with pytest.raises(OperationalException, match=r"DecimalParameter space invalid\."):
|
||||
DecimalParameter([0, 10], high=7, default=5, space="buy")
|
||||
|
||||
decimalpar = DecimalParameter(low=0.0, high=0.5, default=0.14, decimals=1, space="buy")
|
||||
assert decimalpar.value == 0.1
|
||||
assert isinstance(decimalpar.get_space(""), SKDecimal)
|
||||
assert isinstance(decimalpar.range, list)
|
||||
assert len(list(decimalpar.range)) == 1
|
||||
# Range contains ONLY the default / value.
|
||||
assert list(decimalpar.range) == [decimalpar.value]
|
||||
decimalpar.in_space = True
|
||||
assert len(list(decimalpar.range)) == 6
|
||||
assert list(decimalpar.range) == [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
|
||||
|
||||
decimalpar2 = DecimalParameter(low=0.01, high=0.03, decimals=3, default=0.02, space="buy")
|
||||
decimalpar2.in_space = True
|
||||
assert len(list(decimalpar2.range)) == 21
|
||||
expected_range = [round(0.01 + i * 0.001, 3) for i in range(21)]
|
||||
assert list(decimalpar2.range) == expected_range
|
||||
assert decimalpar2.value == 0.02
|
||||
decimalpar2.value = 0.022222
|
||||
assert decimalpar2.value == 0.022
|
||||
|
||||
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
|
||||
assert len(list(decimalpar.range)) == 1
|
||||
|
||||
|
||||
def test_hyperopt_categorical_parameter():
|
||||
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
|
||||
from optuna.distributions import CategoricalDistribution
|
||||
|
||||
with pytest.raises(OperationalException, match=r"CategoricalParameter space must.*"):
|
||||
CategoricalParameter(["aa"], default="aa", space="buy")
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
BaseParameter(opt_range=[0, 1], default=1, space="buy")
|
||||
|
||||
catpar = CategoricalParameter(
|
||||
["buy_rsi", "buy_macd", "buy_none"], default="buy_macd", space="buy"
|
||||
)
|
||||
assert catpar.value == "buy_macd"
|
||||
assert isinstance(catpar.get_space(""), CategoricalDistribution)
|
||||
assert isinstance(catpar.range, list)
|
||||
assert len(list(catpar.range)) == 1
|
||||
# Range contains ONLY the default / value.
|
||||
assert list(catpar.range) == [catpar.value]
|
||||
catpar.in_space = True
|
||||
assert len(list(catpar.range)) == 3
|
||||
assert list(catpar.range) == ["buy_rsi", "buy_macd", "buy_none"]
|
||||
|
||||
boolpar = BooleanParameter(default=True, space="buy")
|
||||
assert boolpar.value is True
|
||||
assert isinstance(boolpar.get_space(""), CategoricalDistribution)
|
||||
assert isinstance(boolpar.range, list)
|
||||
assert len(list(boolpar.range)) == 1
|
||||
|
||||
boolpar.in_space = True
|
||||
assert len(list(boolpar.range)) == 2
|
||||
|
||||
assert list(boolpar.range) == [True, False]
|
||||
|
||||
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
|
||||
assert len(list(catpar.range)) == 1
|
||||
assert len(list(boolpar.range)) == 1
|
||||
@@ -6,7 +6,6 @@ from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
from jsonschema import ValidationError
|
||||
|
||||
from freqtrade.commands import Arguments
|
||||
from freqtrade.configuration import (
|
||||
@@ -51,20 +50,20 @@ def test_load_config_missing_attributes(default_conf) -> None:
|
||||
conf = deepcopy(default_conf)
|
||||
conf.pop("exchange")
|
||||
|
||||
with pytest.raises(ValidationError, match=r".*'exchange' is a required property.*"):
|
||||
with pytest.raises(ConfigurationError, match=r".*'exchange' is a required property.*"):
|
||||
validate_config_schema(conf)
|
||||
|
||||
conf = deepcopy(default_conf)
|
||||
conf.pop("stake_currency")
|
||||
conf["runmode"] = RunMode.DRY_RUN
|
||||
with pytest.raises(ValidationError, match=r".*'stake_currency' is a required property.*"):
|
||||
with pytest.raises(ConfigurationError, match=r".*'stake_currency' is a required property.*"):
|
||||
validate_config_schema(conf)
|
||||
|
||||
|
||||
def test_load_config_incorrect_stake_amount(default_conf) -> None:
|
||||
default_conf["stake_amount"] = "fake"
|
||||
|
||||
with pytest.raises(ValidationError, match=r".*'fake' does not match 'unlimited'.*"):
|
||||
with pytest.raises(ConfigurationError, match=r".*'fake' does not match 'unlimited'.*"):
|
||||
validate_config_schema(default_conf)
|
||||
|
||||
|
||||
@@ -1075,7 +1074,7 @@ def test_load_config_default_exchange(all_conf) -> None:
|
||||
|
||||
assert "exchange" not in all_conf
|
||||
|
||||
with pytest.raises(ValidationError, match=r"'exchange' is a required property"):
|
||||
with pytest.raises(ConfigurationError, match=r"'exchange' is a required property"):
|
||||
validate_config_schema(all_conf)
|
||||
|
||||
|
||||
@@ -1088,14 +1087,14 @@ def test_load_config_default_exchange_name(all_conf) -> None:
|
||||
|
||||
assert "name" not in all_conf["exchange"]
|
||||
|
||||
with pytest.raises(ValidationError, match=r"'name' is a required property"):
|
||||
with pytest.raises(ConfigurationError, match=r"'name' is a required property"):
|
||||
validate_config_schema(all_conf)
|
||||
|
||||
|
||||
def test_load_config_stoploss_exchange_limit_ratio(all_conf) -> None:
|
||||
all_conf["order_types"]["stoploss_on_exchange_limit_ratio"] = 1.15
|
||||
|
||||
with pytest.raises(ValidationError, match=r"1.15 is greater than the maximum"):
|
||||
with pytest.raises(ConfigurationError, match=r"1.15 is greater than the maximum"):
|
||||
validate_config_schema(all_conf)
|
||||
|
||||
|
||||
|
||||
@@ -5,11 +5,11 @@ import talib.abstract as ta
|
||||
def test_talib_bollingerbands_near_zero_values():
|
||||
inputs = pd.DataFrame(
|
||||
[
|
||||
{"close": 0.00000010},
|
||||
{"close": 0.00000011},
|
||||
{"close": 0.00000012},
|
||||
{"close": 0.00000013},
|
||||
{"close": 0.00000014},
|
||||
{"close": 0.000010},
|
||||
{"close": 0.000011},
|
||||
{"close": 0.000012},
|
||||
{"close": 0.000013},
|
||||
{"close": 0.000014},
|
||||
]
|
||||
)
|
||||
bollinger = ta.BBANDS(inputs, matype=0, timeperiod=2)
|
||||
|
||||
Reference in New Issue
Block a user