Compare commits

..

4 Commits

Author SHA1 Message Date
Matthias
6eba5eedbe Merge pull request #12562 from freqtrade/new_release
New release 2025.11
2025-11-30 07:54:30 +01:00
Matthias
3b55e577c2 chore: bump version to 2025.11 2025-11-29 13:00:52 +01:00
Matthias
8e031a01e6 Merge branch 'stable' into new_release 2025-11-29 13:00:27 +01:00
Matthias
4f1e249574 Merge pull request #12452 from freqtrade/new_release
New release 2025.10
2025-10-31 16:53:02 +01:00
65 changed files with 2403 additions and 2466 deletions

View File

@@ -2,7 +2,7 @@ version: 2
updates:
- package-ecosystem: docker
cooldown:
default-days: 7
default-days: 4
directories:
- "/"
- "/docker"
@@ -16,7 +16,7 @@ updates:
- package-ecosystem: devcontainers
directory: "/"
cooldown:
default-days: 7
default-days: 4
schedule:
interval: daily
open-pull-requests-limit: 10
@@ -24,7 +24,7 @@ updates:
- package-ecosystem: pip
directory: "/"
cooldown:
default-days: 7
default-days: 4
exclude:
- ccxt
schedule:
@@ -51,7 +51,7 @@ updates:
- package-ecosystem: "github-actions"
directory: "/"
cooldown:
default-days: 7
default-days: 4
schedule:
interval: "weekly"
open-pull-requests-limit: 10

View File

@@ -15,7 +15,7 @@ jobs:
environment:
name: develop
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -34,7 +34,7 @@ jobs:
run: python build_helpers/binance_update_lev_tiers.py
- uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
token: ${{ secrets.REPO_SCOPED_TOKEN }}
add-paths: freqtrade/exchange/binance_leverage_tiers.json

View File

@@ -28,7 +28,7 @@ jobs:
python-version: ["3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -38,7 +38,7 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with:
activate-environment: true
enable-cache: true
@@ -177,7 +177,7 @@ jobs:
name: "Mypy Version Check"
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -195,7 +195,7 @@ jobs:
name: "Pre-commit checks"
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -208,7 +208,7 @@ jobs:
name: "Documentation build"
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -240,7 +240,7 @@ jobs:
name: "Tests and Linting - Online tests"
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -250,7 +250,7 @@ jobs:
python-version: "3.12"
- name: Install uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with:
activate-environment: true
enable-cache: true
@@ -320,7 +320,7 @@ jobs:
with:
jobs: ${{ toJSON(needs) }}
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -367,7 +367,7 @@ jobs:
id-token: write
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -396,7 +396,7 @@ jobs:
id-token: write
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false

View File

@@ -19,7 +19,7 @@ jobs:
name: Deploy Docs through mike
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: true

View File

@@ -24,7 +24,7 @@ jobs:
packages: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
- name: Login to GitHub Container Registry

View File

@@ -33,7 +33,7 @@ jobs:
if: github.repository == 'freqtrade/freqtrade'
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -152,7 +152,7 @@ jobs:
if: github.repository == 'freqtrade/freqtrade'
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false

View File

@@ -11,7 +11,7 @@ jobs:
dockerHubDescription:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false

View File

@@ -13,7 +13,7 @@ jobs:
auto-update:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6.0.0
- uses: actions/checkout@v5
with:
persist-credentials: false
@@ -28,7 +28,7 @@ jobs:
- name: Run auto-update
run: pre-commit autoupdate
- uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
with:
token: ${{ secrets.REPO_SCOPED_TOKEN }}
add-paths: .pre-commit-config.yaml

View File

@@ -22,9 +22,9 @@ jobs:
# actions: read # only needed for private repos
steps:
- name: Checkout repository
uses: actions/checkout@v6.0.0
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
persist-credentials: false
- name: Run zizmor 🌈
uses: zizmorcore/zizmor-action@e639db99335bc9038abc0e066dfcd72e23d26fb4 # v0.3.0
uses: zizmorcore/zizmor-action@e673c3917a1aef3c65c972347ed84ccd013ecda4 # v0.2.0

View File

@@ -21,7 +21,7 @@ repos:
# stages: [push]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "v1.19.0"
rev: "v1.18.2"
hooks:
- id: mypy
exclude: build_helpers
@@ -31,7 +31,7 @@ repos:
- types-requests==2.32.4.20250913
- types-tabulate==0.9.0.20241207
- types-python-dateutil==2.9.0.20251115
- scipy-stubs==1.16.3.1
- scipy-stubs==1.16.3.0
- SQLAlchemy==2.0.44
# stages: [push]
@@ -44,7 +44,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.14.7'
rev: 'v0.14.6'
hooks:
- id: ruff
- id: ruff-format
@@ -83,6 +83,6 @@ repos:
# Ensure github actions remain safe
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.18.0
rev: v1.16.3
hooks:
- id: zizmor

View File

@@ -11,7 +11,6 @@ usage: freqtrade download-data [-h] [-v] [--no-color] [--logfile FILE] [-V]
[--data-format-ohlcv {json,jsongz,feather,parquet}]
[--data-format-trades {json,jsongz,feather,parquet}]
[--trading-mode {spot,margin,futures}]
[--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]]
[--prepend]
options:
@@ -51,11 +50,6 @@ options:
`feather`).
--trading-mode, --tradingmode {spot,margin,futures}
Select Trading mode
--candle-types {spot,futures,mark,index,premiumIndex,funding_rate} [{spot,futures,mark,index,premiumIndex,funding_rate} ...]
Select candle type to download. Defaults to the
necessary candles for the selected trading mode (e.g.
'spot' or ('futures', 'funding_rate' and 'mark') for
futures).
--prepend Allow data prepending. (Data-appending is disabled)
Common arguments:

View File

@@ -60,7 +60,6 @@ freqtrade download-data --exchange binance --pairs ".*/USDT"
* Given starting points are ignored if data is already available, downloading only missing data up to today.
* Use `--timeframes` to specify what timeframe download the historical candle (OHLCV) data for. Default is `--timeframes 1m 5m` which will download 1-minute and 5-minute data.
* To use exchange, timeframe and list of pairs as defined in your configuration file, use the `-c/--config` option. With this, the script uses the whitelist defined in the config as the list of currency pairs to download data for and does not require the pairs.json file. You can combine `-c/--config` with most other options.
* When downloading futures data (`--trading-mode futures` or a configuration specifying futures mode), freqtrade will automatically download the necessary candle types (e.g. `mark` and `funding_rate` candles) unless specified otherwise via `--candle-types`.
??? Note "Permission denied errors"
If your configuration directory `user_data` was made by docker, you may get the following error:

View File

@@ -98,33 +98,3 @@ Please use configuration based [log setup](advanced-setup.md#advanced-logging) i
The edge module has been deprecated in 2023.9 and removed in 2025.6.
All functionalities of edge have been removed, and having edge configured will result in an error.
## Adjustment to dynamic funding rate handling
With version 2025.12, the handling of dynamic funding rates has been adjusted to also support dynamic funding rates down to 1h funding intervals.
As a consequence, the mark and funding rate timeframes have been changed to 1h for every supported futures exchange.
As the timeframe for both mark and funding_fee candles has changed (usually from 8h to 1h) - already downloaded data will have to be adjusted or partially re-downloaded.
You can either re-download everything (`freqtrade download-data [...] --erase` - :warning: can take a long time) - or download the updated data selectively.
### Selective data re-download
The script below should serve as an example - you may need to adjust the timeframe and exchange to your needs!
``` bash
# Cleanup no longer needed data
rm user_data/data/<exchange>/futures/*-mark-*
rm user_data/data/<exchange>/futures/*-funding_rate-*
# download new data (only required once to fix the mark and funding fee data)
freqtrade download-data -t 1h --trading-mode futures --candle-types funding_rate mark [...] --timerange <full timerange you've got other data for>
```
The result of the above will be that your funding_rates and mark data will have the 1h timeframe.
you can verify this with `freqtrade list-data --exchange <yourexchange> --show`.
!!! Note "Additional arguments"
Additional arguments to the above commands may be necessary, like configuration files or explicit user_data if they deviate from the default.
**Hyperliquid** is a special case now - which will no longer require 1h mark data - but will use regular candles instead (this data never existed and is identical to 1h futures candles). As we don't support download-data for hyperliquid (they don't provide historic data) - there won't be actions necessary for hyperliquid users.

View File

@@ -2,6 +2,6 @@ markdown==3.10
mkdocs==1.6.1
mkdocs-material==9.7.0
mdx_truly_sane_lists==1.3
pymdown-extensions==10.17.2
pymdown-extensions==10.17.1
jinja2==3.1.6
mike==2.1.3

View File

@@ -634,7 +634,7 @@ class AwesomeStrategy(IStrategy):
## Custom order price rules
By default, freqtrade use the orderbook to automatically set an order price ([Relevant documentation](configuration.md#prices-used-for-orders)), you also have the option to create custom order prices based on your strategy.
By default, freqtrade use the orderbook to automatically set an order price([Relevant documentation](configuration.md#prices-used-for-orders)), you also have the option to create custom order prices based on your strategy.
You can use this feature by creating a `custom_entry_price()` function in your strategy file to customize entry prices and `custom_exit_price()` for exits.

View File

@@ -1,6 +1,6 @@
"""Freqtrade bot"""
__version__ = "2025.12-dev"
__version__ = "2025.11"
if "dev" in __version__:
from pathlib import Path

View File

@@ -3,7 +3,6 @@ This module contains the argument manager class
"""
from argparse import ArgumentParser, Namespace, _ArgumentGroup
from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Any
@@ -175,7 +174,6 @@ ARGS_DOWNLOAD_DATA = [
"dataformat_ohlcv",
"dataformat_trades",
"trading_mode",
"candle_types",
"prepend_data",
]
@@ -350,11 +348,7 @@ class Arguments:
def _build_args(self, optionlist: list[str], parser: ArgumentParser | _ArgumentGroup) -> None:
for val in optionlist:
opt = AVAILABLE_CLI_OPTIONS[val]
options = deepcopy(opt.kwargs)
help_text = options.pop("help", None)
if opt.fthelp and isinstance(opt.fthelp, dict) and hasattr(parser, "prog"):
help_text = opt.fthelp.get(parser.prog, help_text)
parser.add_argument(*opt.cli, dest=val, help=help_text, **options)
parser.add_argument(*opt.cli, dest=val, **opt.kwargs)
def _build_subcommands(self) -> None:
"""

View File

@@ -38,14 +38,8 @@ def check_int_nonzero(value: str) -> int:
class Arg:
# Optional CLI arguments
def __init__(self, *args, fthelp: dict[str, str] | None = None, **kwargs):
"""
CLI Arguments - used to build subcommand parsers consistently.
:param fthelp: dict - fthelp per command - should be "freqtrade <command>": help_text
If not provided or not found, 'help' from kwargs is used instead.
"""
def __init__(self, *args, **kwargs):
self.cli = args
self.fthelp = fthelp
self.kwargs = kwargs
@@ -428,14 +422,6 @@ AVAILABLE_CLI_OPTIONS = {
),
"candle_types": Arg(
"--candle-types",
fthelp={
"freqtrade download-data": (
"Select candle type to download. "
"Defaults to the necessary candles for the selected trading mode "
"(e.g. 'spot' or ('futures', 'funding_rate' and 'mark') for futures)."
),
"_": "Select candle type to convert. Defaults to all available types.",
},
help="Select candle type to convert. Defaults to all available types.",
choices=[c.value for c in CandleType],
nargs="+",

View File

@@ -38,8 +38,7 @@ def ohlcv_to_dataframe(
cols = DEFAULT_DATAFRAME_COLUMNS
df = DataFrame(ohlcv, columns=cols)
# Floor date to seconds to account for exchange imprecisions
df["date"] = to_datetime(df["date"], unit="ms", utc=True).dt.floor("s")
df["date"] = to_datetime(df["date"], unit="ms", utc=True)
# Some exchanges return int values for Volume and even for OHLC.
# Convert them since TA-LIB indicators used in the strategy assume floats

View File

@@ -348,22 +348,6 @@ class DataProvider:
)
return total_candles
def __fix_funding_rate_timeframe(
self, pair: str, timeframe: str | None, candle_type: str
) -> str | None:
if (
candle_type == CandleType.FUNDING_RATE
and (ff_tf := self.get_funding_rate_timeframe()) != timeframe
):
# TODO: does this message make sense? might be pointless as funding fees don't
# have a timeframe
logger.warning(
f"{pair}, {timeframe} requested - funding rate timeframe not matching {ff_tf}."
)
return ff_tf
return timeframe
def get_pair_dataframe(
self, pair: str, timeframe: str | None = None, candle_type: str = ""
) -> DataFrame:
@@ -377,7 +361,6 @@ class DataProvider:
:return: Dataframe for this pair
:param candle_type: '', mark, index, premiumIndex, or funding_rate
"""
timeframe = self.__fix_funding_rate_timeframe(pair, timeframe, candle_type)
if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE):
# Get live OHLCV data.
data = self.ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type)
@@ -637,12 +620,3 @@ class DataProvider:
except ExchangeError:
logger.warning(f"Could not fetch market data for {pair}. Assuming no delisting.")
return None
def get_funding_rate_timeframe(self) -> str:
"""
Get the funding rate timeframe from exchange options
:return: Timeframe string
"""
if self._exchange is None:
raise OperationalException(NO_EXCHANGE_EXCEPTION)
return self._exchange.get_option("funding_fee_timeframe")

View File

@@ -397,9 +397,6 @@ class IDataHandler(ABC):
pairdf = self._ohlcv_load(
pair, timeframe, timerange=timerange_startup, candle_type=candle_type
)
if not pairdf.empty and candle_type == CandleType.FUNDING_RATE:
# Funding rate data is sometimes off by a couple of ms - floor to seconds
pairdf["date"] = pairdf["date"].dt.floor("s")
if self._check_empty_df(pairdf, pair, timeframe, candle_type, warn_no_data):
return pairdf
else:
@@ -511,15 +508,8 @@ class IDataHandler(ABC):
Applies to bybit and okx, where funding-fee and mark candles have different timeframes.
"""
paircombs = self.ohlcv_get_available_data(self._datadir, TradingMode.FUTURES)
ff_timeframe_s = timeframe_to_seconds(ff_timeframe)
funding_rate_combs = [
f
for f in paircombs
if f[2] == CandleType.FUNDING_RATE
and f[1] != ff_timeframe
# Only allow smaller timeframes to move from smaller to larger timeframes
and timeframe_to_seconds(f[1]) < ff_timeframe_s
f for f in paircombs if f[2] == CandleType.FUNDING_RATE and f[1] != ff_timeframe
]
if funding_rate_combs:

View File

@@ -308,15 +308,11 @@ def _download_pair_history(
candle_type=candle_type,
until_ms=until_ms if until_ms else None,
)
logger.info(
f"Downloaded data for {pair}, {timeframe}, {candle_type} with length "
f"{len(new_dataframe)}."
)
logger.info(f"Downloaded data for {pair} with length {len(new_dataframe)}.")
else:
new_dataframe = pair_candles
logger.info(
f"Downloaded data for {pair}, {timeframe}, {candle_type} with length "
f"{len(new_dataframe)}. Parallel Method."
f"Downloaded data for {pair} with length {len(new_dataframe)}. Parallel Method."
)
if data.empty:
@@ -353,7 +349,6 @@ def _download_pair_history(
def refresh_backtest_ohlcv_data(
exchange: Exchange,
*,
pairs: list[str],
timeframes: list[str],
datadir: Path,
@@ -364,7 +359,6 @@ def refresh_backtest_ohlcv_data(
data_format: str | None = None,
prepend: bool = False,
progress_tracker: CustomProgress | None = None,
candle_types: list[CandleType] | None = None,
no_parallel_download: bool = False,
) -> list[str]:
"""
@@ -377,44 +371,10 @@ def refresh_backtest_ohlcv_data(
pairs_not_available = []
fast_candles: dict[PairWithTimeframe, DataFrame] = {}
data_handler = get_datahandler(datadir, data_format)
def_candletype = CandleType.SPOT if trading_mode != "futures" else CandleType.FUTURES
if trading_mode != "futures":
# Ignore user passed candle types for non-futures trading
timeframes_with_candletype = [(tf, def_candletype) for tf in timeframes]
else:
# Filter out SPOT candle type for futures trading
candle_types = (
[ct for ct in candle_types if ct != CandleType.SPOT] if candle_types else None
)
fr_candle_type = CandleType.from_string(exchange.get_option("mark_ohlcv_price"))
tf_funding_rate = exchange.get_option("funding_fee_timeframe")
tf_mark = exchange.get_option("mark_ohlcv_timeframe")
if candle_types:
for ct in candle_types:
exchange.verify_candle_type_support(ct)
timeframes_with_candletype = [
(tf, ct)
for ct in candle_types
for tf in timeframes
if ct != CandleType.FUNDING_RATE
]
else:
# Default behavior
timeframes_with_candletype = [(tf, def_candletype) for tf in timeframes]
timeframes_with_candletype.append((tf_mark, fr_candle_type))
if not candle_types or CandleType.FUNDING_RATE in candle_types:
# All exchanges need FundingRate for futures trading.
# The timeframe is aligned to the mark-price timeframe.
timeframes_with_candletype.append((tf_funding_rate, CandleType.FUNDING_RATE))
# Deduplicate list ...
timeframes_with_candletype = list(dict.fromkeys(timeframes_with_candletype))
logger.debug(
"Downloading %s.", ", ".join(f'"{tf} {ct}"' for tf, ct in timeframes_with_candletype)
)
candle_type = CandleType.get_default(trading_mode)
with progress_tracker as progress:
timeframe_task = progress.add_task("Timeframe", total=len(timeframes_with_candletype))
tf_length = len(timeframes) if trading_mode != "futures" else len(timeframes) + 2
timeframe_task = progress.add_task("Timeframe", total=tf_length)
pair_task = progress.add_task("Downloading data...", total=len(pairs))
for pair in pairs:
@@ -425,7 +385,7 @@ def refresh_backtest_ohlcv_data(
pairs_not_available.append(f"{pair}: Pair not available on exchange.")
logger.info(f"Skipping pair {pair}...")
continue
for timeframe, candle_type in timeframes_with_candletype:
for timeframe in timeframes:
# Get fast candles via parallel method on first loop through per timeframe
# and candle type. Downloads all the pairs in the list and stores them.
# Also skips if only 1 pair/timeframe combination is scheduled for download.
@@ -452,7 +412,7 @@ def refresh_backtest_ohlcv_data(
# get the already downloaded pair candles if they exist
pair_candles = fast_candles.pop((pair, timeframe, candle_type), None)
progress.update(timeframe_task, description=f"Timeframe {timeframe} {candle_type}")
progress.update(timeframe_task, description=f"Timeframe {timeframe}")
logger.debug(f"Downloading pair {pair}, {candle_type}, interval {timeframe}.")
_download_pair_history(
pair=pair,
@@ -468,6 +428,33 @@ def refresh_backtest_ohlcv_data(
pair_candles=pair_candles, # optional pass of dataframe of parallel candles
)
progress.update(timeframe_task, advance=1)
if trading_mode == "futures":
# Predefined candletype (and timeframe) depending on exchange
# Downloads what is necessary to backtest based on futures data.
tf_mark = exchange.get_option("mark_ohlcv_timeframe")
tf_funding_rate = exchange.get_option("funding_fee_timeframe")
fr_candle_type = CandleType.from_string(exchange.get_option("mark_ohlcv_price"))
# All exchanges need FundingRate for futures trading.
# The timeframe is aligned to the mark-price timeframe.
combs = ((CandleType.FUNDING_RATE, tf_funding_rate), (fr_candle_type, tf_mark))
for candle_type_f, tf in combs:
logger.debug(f"Downloading pair {pair}, {candle_type_f}, interval {tf}.")
_download_pair_history(
pair=pair,
datadir=datadir,
exchange=exchange,
timerange=timerange,
data_handler=data_handler,
timeframe=str(tf),
new_pairs_days=new_pairs_days,
candle_type=candle_type_f,
erase=erase,
prepend=prepend,
)
progress.update(
timeframe_task, advance=1, description=f"Timeframe {candle_type_f}, {tf}"
)
progress.update(pair_task, advance=1)
progress.update(timeframe_task, description="Timeframe")
@@ -813,7 +800,6 @@ def download_data(
trading_mode=config.get("trading_mode", "spot"),
prepend=config.get("prepend_data", False),
progress_tracker=progress_tracker,
candle_types=config.get("candle_types"),
no_parallel_download=config.get("no_parallel_download", False),
)
finally:

View File

@@ -74,10 +74,9 @@ def combined_dataframes_with_rel_mean(
df_comb = combine_dataframes_by_column(data, column)
# Trim dataframes to the given timeframe
df_comb = df_comb.iloc[(df_comb.index >= fromdt) & (df_comb.index < todt)]
rel_mean = df_comb.pct_change().mean(axis=1).fillna(0).cumsum()
df_comb["count"] = df_comb.count(axis=1)
df_comb["mean"] = df_comb.mean(axis=1)
df_comb["rel_mean"] = rel_mean
df_comb["rel_mean"] = df_comb["mean"].pct_change().fillna(0).cumsum()
return df_comb[["mean", "rel_mean", "count"]]

File diff suppressed because it is too large Load Diff

View File

@@ -35,6 +35,7 @@ class Bitget(Exchange):
"order_time_in_force": ["GTC", "FOK", "IOC", "PO"],
}
_ft_has_futures: FtHas = {
"mark_ohlcv_timeframe": "4h",
"funding_fee_candle_limit": 100,
"has_delisting": True,
}

View File

@@ -38,6 +38,8 @@ class Bybit(Exchange):
}
_ft_has_futures: FtHas = {
"ohlcv_has_history": True,
"mark_ohlcv_timeframe": "4h",
"funding_fee_timeframe": "8h",
"funding_fee_candle_limit": 200,
"stoploss_on_exchange": True,
"stoploss_order_types": {"limit": "limit", "market": "market"},

View File

@@ -97,9 +97,6 @@ EXCHANGE_HAS_OPTIONAL = [
# 'fetchLeverageTiers', # Futures initialization
# 'fetchMarketLeverageTiers', # Futures initialization
# 'fetchOpenOrders', 'fetchClosedOrders', # 'fetchOrders', # Refinding balance...
# "fetchPremiumIndexOHLCV", # Futures additional data
# "fetchMarkOHLCV", # Futures additional data
# "fetchIndexOHLCV", # Futures additional data
# ccxt.pro
"watchOHLCV",
]

View File

@@ -104,7 +104,6 @@ from freqtrade.misc import (
deep_merge_dicts,
file_dump_json,
file_load_json,
safe_value_fallback,
safe_value_fallback2,
)
from freqtrade.util import FtTTLCache, PeriodicCache, dt_from_ts, dt_now
@@ -153,8 +152,8 @@ class Exchange:
"l2_limit_range_required": True, # Allow Empty L2 limit (kucoin)
"l2_limit_upper": None, # Upper limit for L2 limit
"mark_ohlcv_price": "mark",
"mark_ohlcv_timeframe": "1h",
"funding_fee_timeframe": "1h",
"mark_ohlcv_timeframe": "8h",
"funding_fee_timeframe": "8h",
"ccxt_futures_name": "swap",
"needs_trading_fees": False, # use fetch_trading_fees to cache fees
"order_props_in_contracts": ["amount", "filled", "remaining"],
@@ -1120,7 +1119,6 @@ class Exchange:
leverage: float,
params: dict | None = None,
stop_loss: bool = False,
stop_price: float | None = None,
) -> CcxtOrder:
now = dt_now()
order_id = f"dry_run_{side}_{pair}_{now.timestamp()}"
@@ -1147,7 +1145,7 @@ class Exchange:
}
if stop_loss:
dry_order["info"] = {"stopPrice": dry_order["price"]}
dry_order[self._ft_has["stop_price_prop"]] = stop_price or dry_order["price"]
dry_order[self._ft_has["stop_price_prop"]] = dry_order["price"]
# Workaround to avoid filling stoploss orders immediately
dry_order["ft_order_type"] = "stoploss"
orderbook: OrderBook | None = None
@@ -1165,11 +1163,7 @@ class Exchange:
if dry_order["type"] == "market" and not dry_order.get("ft_order_type"):
# Update market order pricing
slippage = 0.05
worst_rate = rate * ((1 + slippage) if side == "buy" else (1 - slippage))
average = self.get_dry_market_fill_price(
pair, side, amount, rate, worst_rate, orderbook
)
average = self.get_dry_market_fill_price(pair, side, amount, rate, orderbook)
dry_order.update(
{
"average": average,
@@ -1209,13 +1203,7 @@ class Exchange:
return dry_order
def get_dry_market_fill_price(
self,
pair: str,
side: str,
amount: float,
rate: float,
worst_rate: float,
orderbook: OrderBook | None,
self, pair: str, side: str, amount: float, rate: float, orderbook: OrderBook | None
) -> float:
"""
Get the market order fill price based on orderbook interpolation
@@ -1224,6 +1212,8 @@ class Exchange:
if not orderbook:
orderbook = self.fetch_l2_order_book(pair, 20)
ob_type: OBLiteral = "asks" if side == "buy" else "bids"
slippage = 0.05
max_slippage_val = rate * ((1 + slippage) if side == "buy" else (1 - slippage))
remaining_amount = amount
filled_value = 0.0
@@ -1247,10 +1237,11 @@ class Exchange:
forecast_avg_filled_price = max(filled_value, 0) / amount
# Limit max. slippage to specified value
if side == "buy":
forecast_avg_filled_price = min(forecast_avg_filled_price, worst_rate)
forecast_avg_filled_price = min(forecast_avg_filled_price, max_slippage_val)
else:
forecast_avg_filled_price = max(forecast_avg_filled_price, worst_rate)
forecast_avg_filled_price = max(forecast_avg_filled_price, max_slippage_val)
return self.price_to_precision(pair, forecast_avg_filled_price)
return rate
@@ -1262,15 +1253,13 @@ class Exchange:
limit: float,
orderbook: OrderBook | None = None,
offset: float = 0.0,
is_stop: bool = False,
) -> bool:
if not self.exchange_has("fetchL2OrderBook"):
# True unless checking a stoploss order
return not is_stop
return True
if not orderbook:
orderbook = self.fetch_l2_order_book(pair, 1)
try:
if (side == "buy" and not is_stop) or (side == "sell" and is_stop):
if side == "buy":
price = orderbook["asks"][0][0]
if limit * (1 - offset) >= price:
return True
@@ -1289,38 +1278,6 @@ class Exchange:
"""
Check dry-run limit order fill and update fee (if it filled).
"""
if order["status"] != "closed" and order.get("ft_order_type") == "stoploss":
pair = order["symbol"]
if not orderbook and self.exchange_has("fetchL2OrderBook"):
orderbook = self.fetch_l2_order_book(pair, 20)
price = safe_value_fallback(order, self._ft_has["stop_price_prop"], "price")
crossed = self._dry_is_price_crossed(
pair, order["side"], price, orderbook, is_stop=True
)
if crossed:
average = self.get_dry_market_fill_price(
pair,
order["side"],
order["amount"],
price,
worst_rate=order["price"],
orderbook=orderbook,
)
order.update(
{
"status": "closed",
"filled": order["amount"],
"remaining": 0,
"average": average,
"cost": order["amount"] * average,
}
)
self.add_dry_order_fee(
pair,
order,
"taker" if immediate else "maker",
)
return order
if (
order["status"] != "closed"
and order["type"] in ["limit"]
@@ -1560,9 +1517,8 @@ class Exchange:
ordertype,
side,
amount,
limit_rate or stop_price_norm,
stop_price_norm,
stop_loss=True,
stop_price=stop_price_norm,
leverage=leverage,
)
return dry_order
@@ -2690,25 +2646,24 @@ class Exchange:
input_coroutines: list[Coroutine[Any, Any, OHLCVResponse]] = []
cached_pairs = []
for pair, timeframe, candle_type in set(pair_list):
if candle_type == CandleType.FUNDING_RATE and timeframe != (
ff_tf := self.get_option("funding_fee_timeframe")
):
# TODO: does this message make sense? would docs be better?
# if any, this should be cached to avoid log spam!
logger.warning(
f"Wrong funding rate timeframe {timeframe} for pair {pair}, "
f"downloading {ff_tf} instead."
)
timeframe = ff_tf
invalid_funding = (
candle_type == CandleType.FUNDING_RATE
and timeframe != self.get_option("funding_fee_timeframe")
)
invalid_timeframe = timeframe not in self.timeframes and candle_type in (
CandleType.SPOT,
CandleType.FUTURES,
)
if invalid_timeframe:
if invalid_timeframe or invalid_funding:
timeframes_ = (
", ".join(self.timeframes)
if candle_type != CandleType.FUNDING_RATE
else self.get_option("funding_fee_timeframe")
)
logger.warning(
f"Cannot download ({pair}, {timeframe}, {candle_type}) combination as this "
f"timeframe is not available on {self.name}. Available timeframes are "
f"{', '.join(self.timeframes)}."
f"{timeframes_}."
)
continue
@@ -2745,11 +2700,7 @@ class Exchange:
has_cache = cache and (pair, timeframe, c_type) in self._klines
# in case of existing cache, fill_missing happens after concatenation
ohlcv_df = ohlcv_to_dataframe(
ticks,
timeframe,
pair=pair,
fill_missing=not has_cache and c_type != CandleType.FUNDING_RATE,
drop_incomplete=drop_incomplete,
ticks, timeframe, pair=pair, fill_missing=not has_cache, drop_incomplete=drop_incomplete
)
# keeping parsed dataframe in cache
if cache:
@@ -2760,7 +2711,7 @@ class Exchange:
concat([old, ohlcv_df], axis=0),
timeframe,
pair,
fill_missing=c_type != CandleType.FUNDING_RATE,
fill_missing=True,
drop_incomplete=False,
)
candle_limit = self.ohlcv_candle_limit(timeframe, self._config["candle_type_def"])
@@ -2895,10 +2846,9 @@ class Exchange:
timeframe, candle_type=candle_type, since_ms=since_ms
)
if candle_type and candle_type not in (CandleType.SPOT, CandleType.FUTURES):
params.update({"price": candle_type.value})
if candle_type != CandleType.FUNDING_RATE:
if candle_type and candle_type not in (CandleType.SPOT, CandleType.FUTURES):
self.verify_candle_type_support(candle_type)
params.update({"price": str(candle_type)})
data = await self._api_async.fetch_ohlcv(
pair, timeframe=timeframe, since=since_ms, limit=candle_limit, params=params
)
@@ -2963,38 +2913,6 @@ class Exchange:
data = [[x["timestamp"], x["fundingRate"], 0, 0, 0, 0] for x in data]
return data
def check_candle_type_support(self, candle_type: CandleType) -> bool:
"""
Check that the exchange supports the given candle type.
:param candle_type: CandleType to verify
:return: True if supported, False otherwise
"""
if candle_type == CandleType.FUNDING_RATE:
if not self.exchange_has("fetchFundingRateHistory"):
return False
elif candle_type not in (CandleType.SPOT, CandleType.FUTURES):
mapping = {
CandleType.MARK: "fetchMarkOHLCV",
CandleType.INDEX: "fetchIndexOHLCV",
CandleType.PREMIUMINDEX: "fetchPremiumIndexOHLCV",
CandleType.FUNDING_RATE: "fetchFundingRateHistory",
}
_method = mapping.get(candle_type, "fetchOHLCV")
if not self.exchange_has(_method):
return False
return True
def verify_candle_type_support(self, candle_type: CandleType) -> None:
"""
Verify that the exchange supports the given candle type.
:param candle_type: CandleType to verify
:raises OperationalException: if the candle type is not supported
"""
if not self.check_candle_type_support(candle_type):
raise OperationalException(
f"Exchange {self._api.name} does not support fetching {candle_type} candles."
)
# fetch Trade data stuff
def needed_candle_for_trades_ms(self, timeframe: str, candle_type: CandleType) -> int:
@@ -3822,11 +3740,10 @@ class Exchange:
:param mark_rates: Dataframe containing Mark rates (Type mark_ohlcv_price)
:param futures_funding_rate: Fake funding rate to use if funding_rates are not available
"""
relevant_cols = ["date", "open_mark", "open_fund"]
if futures_funding_rate is None:
return mark_rates.merge(
funding_rates, on="date", how="inner", suffixes=["_mark", "_fund"]
)[relevant_cols]
)
else:
if len(funding_rates) == 0:
# No funding rate candles - full fillup with fallback variable
@@ -3839,23 +3756,15 @@ class Exchange:
"low": "low_mark",
"volume": "volume_mark",
}
)[relevant_cols]
)
else:
# Fill up missing funding_rate candles with fallback value
combined = mark_rates.merge(
funding_rates, on="date", how="left", suffixes=["_mark", "_fund"]
)
# Fill only leading missing funding rates so gaps stay untouched
first_valid_idx = combined["open_fund"].first_valid_index()
if first_valid_idx is None:
combined["open_fund"] = futures_funding_rate
else:
is_leading_na = (combined.index <= first_valid_idx) & combined[
"open_fund"
].isna()
combined.loc[is_leading_na, "open_fund"] = futures_funding_rate
return combined[relevant_cols].dropna()
combined["open_fund"] = combined["open_fund"].fillna(futures_funding_rate)
return combined
def calculate_funding_fees(
self,

View File

@@ -37,9 +37,9 @@ class Hyperliquid(Exchange):
"stoploss_order_types": {"limit": "limit"},
"stoploss_blocks_assets": False,
"stop_price_prop": "stopPrice",
"funding_fee_timeframe": "1h",
"funding_fee_candle_limit": 500,
"uses_leverage_tiers": False,
"mark_ohlcv_price": "futures",
}
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [

View File

@@ -35,6 +35,7 @@ class Kraken(Exchange):
"trades_pagination_arg": "since",
"trades_pagination_overlap": False,
"trades_has_history": True,
"mark_ohlcv_timeframe": "4h",
}
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [

View File

@@ -29,6 +29,8 @@ class Okx(Exchange):
_ft_has: FtHas = {
"ohlcv_candle_limit": 100, # Warning, special case with data prior to X months
"mark_ohlcv_timeframe": "4h",
"funding_fee_timeframe": "8h",
"stoploss_order_types": {"limit": "limit"},
"stoploss_on_exchange": True,
"trades_has_history": False, # Endpoint doesn't have a "since" parameter
@@ -39,8 +41,8 @@ class Okx(Exchange):
"stop_price_type_field": "slTriggerPxType",
"stop_price_type_value_mapping": {
PriceType.LAST: "last",
PriceType.MARK: "mark",
PriceType.INDEX: "index",
PriceType.MARK: "index",
PriceType.INDEX: "mark",
},
"stoploss_blocks_assets": False,
"ws_enabled": True,

View File

@@ -1063,16 +1063,7 @@ class FreqtradeBot(LoggingMixin):
return True
def cancel_stoploss_on_exchange(self, trade: Trade, allow_nonblocking: bool = False) -> Trade:
"""
Cancels on exchange stoploss orders for the given trade.
:param trade: Trade for which to cancel stoploss order
:param allow_nonblocking: If True, will skip cancelling stoploss on exchange
if the exchange supports blocking stoploss orders.
"""
if allow_nonblocking and not self.exchange.get_option("stoploss_blocks_assets", True):
logger.info(f"Skipping cancelling stoploss on exchange for {trade}.")
return trade
def cancel_stoploss_on_exchange(self, trade: Trade) -> Trade:
# First cancelling stoploss on exchange ...
for oslo in trade.open_sl_orders:
try:
@@ -2097,7 +2088,7 @@ class FreqtradeBot(LoggingMixin):
limit = self.get_valid_price(custom_exit_price, proposed_limit_rate)
# First cancelling stoploss on exchange ...
trade = self.cancel_stoploss_on_exchange(trade, allow_nonblocking=True)
trade = self.cancel_stoploss_on_exchange(trade)
order_type = ordertype or self.strategy.order_types[exit_type]
if exit_check.exit_type == ExitType.EMERGENCY_EXIT:
@@ -2387,8 +2378,6 @@ class FreqtradeBot(LoggingMixin):
self.strategy.ft_stoploss_adjust(
current_rate, trade, datetime.now(UTC), profit, 0, after_fill=True
)
if not trade.is_open:
self.cancel_stoploss_on_exchange(trade)
# Updating wallets when order is closed
self.wallets.update()
return trade

View File

@@ -374,7 +374,6 @@ class Backtesting:
timerange=self.timerange,
startup_candles=0,
fail_without_data=True,
fill_up_missing=False,
data_format=self.config["dataformat_ohlcv"],
candle_type=CandleType.FUNDING_RATE,
)

View File

@@ -48,7 +48,7 @@ from freqtrade.leverage import interest
from freqtrade.misc import safe_value_fallback
from freqtrade.persistence.base import ModelBase, SessionType
from freqtrade.persistence.custom_data import CustomDataWrapper, _CustomData
from freqtrade.util import FtPrecise, dt_from_ts, dt_now, dt_ts, dt_ts_none, round_value
from freqtrade.util import FtPrecise, dt_from_ts, dt_now, dt_ts, dt_ts_none
logger = logging.getLogger(__name__)
@@ -654,10 +654,9 @@ class LocalTrade:
)
return (
f"Trade(id={self.id}, pair={self.pair}, amount={round_value(self.amount, 8)}, "
f"is_short={self.is_short or False}, "
f"leverage={round_value(self.leverage or 1.0, 1)}, "
f"open_rate={round_value(self.open_rate, 8)}, open_since={open_since})"
f"Trade(id={self.id}, pair={self.pair}, amount={self.amount:.8f}, "
f"is_short={self.is_short or False}, leverage={self.leverage or 1.0}, "
f"open_rate={self.open_rate:.8f}, open_since={open_since})"
)
def to_json(self, minified: bool = False) -> dict[str, Any]:

View File

@@ -104,11 +104,8 @@ def _create_and_merge_informative_pair(
):
asset = inf_data.asset or ""
timeframe = inf_data.timeframe
timeframe1 = inf_data.timeframe
fmt = inf_data.fmt
candle_type = inf_data.candle_type
if candle_type == CandleType.FUNDING_RATE:
timeframe1 = strategy.dp.get_funding_rate_timeframe()
config = strategy.config
@@ -135,10 +132,10 @@ def _create_and_merge_informative_pair(
fmt = "{base}_{quote}_" + fmt # Informatives of other pairs
inf_metadata = {"pair": asset, "timeframe": timeframe}
inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe1, candle_type)
inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe, candle_type)
if inf_dataframe.empty:
raise ValueError(
f"Informative dataframe for ({asset}, {timeframe1}, {candle_type}) is empty. "
f"Informative dataframe for ({asset}, {timeframe}, {candle_type}) is empty. "
"Can't populate informative indicators."
)
inf_dataframe = populate_indicators_fn(strategy, inf_dataframe, inf_metadata)
@@ -166,7 +163,7 @@ def _create_and_merge_informative_pair(
dataframe,
inf_dataframe,
strategy.timeframe,
timeframe1,
timeframe,
ffill=inf_data.ffill,
append_timeframe=False,
date_column=date_column,

View File

@@ -1718,7 +1718,7 @@ class IStrategy(ABC, HyperStrategyMixin):
timeout_unit = self.config.get("unfilledtimeout", {}).get("unit", "minutes")
timeout_kwargs = {timeout_unit: -timeout}
timeout_threshold = current_time + timedelta(**timeout_kwargs)
timedout = order.status == "open" and order.order_date_utc <= timeout_threshold
timedout = order.status == "open" and order.order_date_utc < timeout_threshold
if timedout:
return True
time_method = (

View File

@@ -90,16 +90,15 @@ def dt_humanize_delta(dt: datetime):
return humanize.naturaltime(dt)
def format_date(date: datetime | None, fallback: str = "") -> str:
def format_date(date: datetime | None) -> str:
"""
Return a formatted date string.
Returns an empty string if date is None.
:param date: datetime to format
:param fallback: value to return if date is None
"""
if date:
return date.strftime(DATETIME_PRINT_FORMAT)
return fallback
return ""
def format_ms_time(date: int | float) -> str:

View File

@@ -23,7 +23,7 @@ def strip_trailing_zeros(value: str) -> str:
return value.rstrip("0").rstrip(".")
def round_value(value: float | None, decimals: int, keep_trailing_zeros=False) -> str:
def round_value(value: float, decimals: int, keep_trailing_zeros=False) -> str:
"""
Round value to given decimals
:param value: Value to be rounded
@@ -31,7 +31,7 @@ def round_value(value: float | None, decimals: int, keep_trailing_zeros=False) -
:param keep_trailing_zeros: Keep trailing zeros "222.200" vs. "222.2"
:return: Rounded value as string
"""
if value is None or isnan(value):
if isnan(value):
return "N/A"
val = f"{value:.{decimals}f}"
if not keep_trailing_zeros:

View File

@@ -1,7 +1,7 @@
from freqtrade_client.ft_rest_client import FtRestClient
__version__ = "2025.12-dev"
__version__ = "2025.11"
if "dev" in __version__:
from pathlib import Path

View File

@@ -6,9 +6,9 @@
-r requirements-freqai-rl.txt
-r docs/requirements-docs.txt
ruff==0.14.6
ruff==0.14.5
mypy==1.18.2
pre-commit==4.5.0
pre-commit==4.4.0
pytest==9.0.1
pytest-asyncio==1.3.0
pytest-cov==7.0.0
@@ -18,13 +18,13 @@ pytest-timeout==2.4.0
pytest-xdist==3.8.0
isort==7.0.0
# For datetime mocking
time-machine==3.1.0
time-machine==3.0.0
# Convert jupyter notebooks to markdown documents
nbconvert==7.16.6
# mypy types
scipy-stubs==1.16.3.1 # keep in sync with `scipy` in `requirements-hyperopt.txt`
scipy-stubs==1.16.3.0 # keep in sync with `scipy` in `requirements-hyperopt.txt`
types-cachetools==6.2.0.20251022
types-filelock==3.2.7
types-requests==2.32.4.20250913

View File

@@ -7,6 +7,6 @@ scikit-learn==1.7.2
joblib==1.5.2
catboost==1.2.8; 'arm' not in platform_machine
lightgbm==4.6.0
xgboost==3.1.2
xgboost==3.1.1
tensorboard==2.20.0
datasieve==0.1.9

View File

@@ -7,7 +7,7 @@ ft-pandas-ta==0.3.16
ta-lib==0.6.8
technical==1.5.3
ccxt==4.5.24
ccxt==4.5.20
cryptography==46.0.3
aiohttp==3.13.2
SQLAlchemy==2.0.44
@@ -37,8 +37,8 @@ orjson==3.11.4
sdnotify==0.3.2
# API Server
fastapi==0.122.0
pydantic==2.12.5
fastapi==0.121.3
pydantic==2.12.4
uvicorn==0.38.0
pyjwt==2.10.1
aiofiles==25.1.0

View File

@@ -1767,7 +1767,7 @@ def test_start_list_data(testdatadir, capsys):
pargs["config"] = None
start_list_data(pargs)
captured = capsys.readouterr()
assert "Found 18 pair / timeframe combinations." in captured.out
assert "Found 16 pair / timeframe combinations." in captured.out
assert re.search(r".*Pair.*Timeframe.*Type.*\n", captured.out)
assert re.search(r"\n.* UNITTEST/BTC .* 1m, 5m, 8m, 30m .* spot |\n", captured.out)
@@ -1801,10 +1801,10 @@ def test_start_list_data(testdatadir, capsys):
start_list_data(pargs)
captured = capsys.readouterr()
assert "Found 5 pair / timeframe combinations." in captured.out
assert "Found 6 pair / timeframe combinations." in captured.out
assert re.search(r".*Pair.*Timeframe.*Type.*\n", captured.out)
assert re.search(r"\n.* XRP/USDT:USDT .* 5m, 1h .* futures |\n", captured.out)
assert re.search(r"\n.* XRP/USDT:USDT .* 1h.* mark |\n", captured.out)
assert re.search(r"\n.* XRP/USDT:USDT .* 1h, 8h .* mark |\n", captured.out)
args = [
"list-data",

View File

@@ -290,23 +290,20 @@ def test_combine_dataframes_with_mean(testdatadir):
def test_combined_dataframes_with_rel_mean(testdatadir):
pairs = ["BTC/USDT", "XRP/USDT"]
pairs = ["ETH/BTC", "ADA/BTC"]
data = load_data(datadir=testdatadir, pairs=pairs, timeframe="5m")
df = combined_dataframes_with_rel_mean(
data,
fromdt=data["BTC/USDT"].at[0, "date"],
todt=data["BTC/USDT"].at[data["BTC/USDT"].index[-1], "date"],
data, datetime(2018, 1, 12, tzinfo=UTC), datetime(2018, 1, 28, tzinfo=UTC)
)
assert isinstance(df, DataFrame)
assert "BTC/USDT" not in df.columns
assert "XRP/USDT" not in df.columns
assert "ETH/BTC" not in df.columns
assert "ADA/BTC" not in df.columns
assert "mean" in df.columns
assert "rel_mean" in df.columns
assert "count" in df.columns
assert df.iloc[0]["count"] == 2
assert df.iloc[-1]["count"] == 2
assert len(df) < len(data["BTC/USDT"])
assert df["rel_mean"].between(-0.5, 0.5).all()
assert len(df) < len(data["ETH/BTC"])
def test_combine_dataframes_with_mean_no_data(testdatadir):

View File

@@ -40,8 +40,6 @@ def test_datahandler_ohlcv_get_pairs(testdatadir):
"NXT/BTC",
"DASH/BTC",
"XRP/ETH",
"BTC/USDT",
"XRP/USDT",
}
pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, "8m", candle_type=CandleType.SPOT)
@@ -113,8 +111,6 @@ def test_datahandler_ohlcv_get_available_data(testdatadir):
("DASH/BTC", "5m", CandleType.SPOT),
("XRP/ETH", "1m", CandleType.SPOT),
("XRP/ETH", "5m", CandleType.SPOT),
("BTC/USDT", "5m", CandleType.SPOT),
("XRP/USDT", "5m", CandleType.SPOT),
("UNITTEST/BTC", "30m", CandleType.SPOT),
("UNITTEST/BTC", "8m", CandleType.SPOT),
}
@@ -126,7 +122,8 @@ def test_datahandler_ohlcv_get_available_data(testdatadir):
("XRP/USDT:USDT", "5m", "futures"),
("XRP/USDT:USDT", "1h", "futures"),
("XRP/USDT:USDT", "1h", "mark"),
("XRP/USDT:USDT", "1h", "funding_rate"),
("XRP/USDT:USDT", "8h", "mark"),
("XRP/USDT:USDT", "8h", "funding_rate"),
}
paircombs = JsonGzDataHandler.ohlcv_get_available_data(testdatadir, TradingMode.SPOT)

View File

@@ -9,7 +9,7 @@ from freqtrade.enums import CandleType, RunMode
from freqtrade.exceptions import ExchangeError, OperationalException
from freqtrade.plugins.pairlistmanager import PairListManager
from freqtrade.util import dt_utc
from tests.conftest import EXMS, generate_test_data, get_patched_exchange, log_has_re
from tests.conftest import EXMS, generate_test_data, get_patched_exchange
@pytest.mark.parametrize(
@@ -185,28 +185,6 @@ def test_get_pair_dataframe(mocker, default_conf, ohlcv_history, candle_type):
assert len(df) == 2 # ohlcv_history is limited to 2 rows now
def test_get_pair_dataframe_funding_rate(mocker, default_conf, ohlcv_history, caplog):
default_conf["runmode"] = RunMode.DRY_RUN
timeframe = "1h"
exchange = get_patched_exchange(mocker, default_conf)
candletype = CandleType.FUNDING_RATE
exchange._klines[("XRP/BTC", timeframe, candletype)] = ohlcv_history
exchange._klines[("UNITTEST/BTC", timeframe, candletype)] = ohlcv_history
dp = DataProvider(default_conf, exchange)
assert dp.runmode == RunMode.DRY_RUN
assert ohlcv_history.equals(
dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type="funding_rate")
)
msg = r".*funding rate timeframe not matching"
assert not log_has_re(msg, caplog)
assert ohlcv_history.equals(
dp.get_pair_dataframe("UNITTEST/BTC", "5h", candle_type="funding_rate")
)
assert log_has_re(msg, caplog)
def test_available_pairs(mocker, default_conf, ohlcv_history):
exchange = get_patched_exchange(mocker, default_conf)
timeframe = default_conf["timeframe"]
@@ -658,21 +636,3 @@ def test_check_delisting(mocker, default_conf_usdt):
assert res == dt_utc(2025, 10, 2)
assert delist_mock2.call_count == 1
def test_get_funding_rate_timeframe(mocker, default_conf_usdt):
default_conf_usdt["trading_mode"] = "futures"
default_conf_usdt["margin_mode"] = "isolated"
exchange = get_patched_exchange(mocker, default_conf_usdt)
mock_get_option = mocker.spy(exchange, "get_option")
dp = DataProvider(default_conf_usdt, exchange)
assert dp.get_funding_rate_timeframe() == "1h"
mock_get_option.assert_called_once_with("funding_fee_timeframe")
def test_get_funding_rate_timeframe_no_exchange(default_conf_usdt):
dp = DataProvider(default_conf_usdt, None)
with pytest.raises(OperationalException, match=r"Exchange is not available to DataProvider."):
dp.get_funding_rate_timeframe()

View File

@@ -534,19 +534,18 @@ def test_validate_backtest_data(default_conf, mocker, caplog, testdatadir) -> No
@pytest.mark.parametrize(
"trademode,callcount, callcount_parallel",
"trademode,callcount",
[
("spot", 4, 2),
("margin", 4, 2),
("futures", 8, 4), # Called 8 times - 4 normal, 2 funding and 2 mark/index calls
("spot", 4),
("margin", 4),
("futures", 8), # Called 8 times - 4 normal, 2 funding and 2 mark/index calls
],
)
def test_refresh_backtest_ohlcv_data(
mocker, default_conf, markets, caplog, testdatadir, trademode, callcount, callcount_parallel
mocker, default_conf, markets, caplog, testdatadir, trademode, callcount
):
caplog.set_level(logging.DEBUG)
dl_mock = mocker.patch("freqtrade.data.history.history_utils._download_pair_history")
mocker.patch(f"{EXMS}.verify_candle_type_support", MagicMock())
def parallel_mock(pairs, timeframe, candle_type, **kwargs):
return {(pair, timeframe, candle_type): DataFrame() for pair in pairs}
@@ -574,15 +573,14 @@ def test_refresh_backtest_ohlcv_data(
)
# Called once per timeframe (as we return an empty dataframe)
# called twice for spot/margin and 4 times for futures
assert parallel_mock.call_count == callcount_parallel
assert parallel_mock.call_count == 2
assert dl_mock.call_count == callcount
assert dl_mock.call_args[1]["timerange"].starttype == "date"
assert log_has_re(r"Downloading pair ETH/BTC, .* interval 1m\.", caplog)
if trademode == "futures":
assert log_has_re(r"Downloading pair ETH/BTC, funding_rate, interval 1h\.", caplog)
assert log_has_re(r"Downloading pair ETH/BTC, mark, interval 1h\.", caplog)
assert log_has_re(r"Downloading pair ETH/BTC, funding_rate, interval 8h\.", caplog)
assert log_has_re(r"Downloading pair ETH/BTC, mark, interval 4h\.", caplog)
# Test with only one pair - no parallel download should happen 1 pair/timeframe combination
# doesn't justify parallelization
@@ -601,24 +599,6 @@ def test_refresh_backtest_ohlcv_data(
)
assert parallel_mock.call_count == 0
if trademode == "futures":
dl_mock.reset_mock()
refresh_backtest_ohlcv_data(
exchange=ex,
pairs=[
"ETH/BTC",
],
timeframes=["5m", "1h"],
datadir=testdatadir,
timerange=timerange,
erase=False,
trading_mode=trademode,
no_parallel_download=True,
candle_types=["premiumIndex", "funding_rate"],
)
assert parallel_mock.call_count == 0
assert dl_mock.call_count == 3 # 2 timeframes premiumIndex + 1x funding_rate
def test_download_data_no_markets(mocker, default_conf, caplog, testdatadir):
dl_mock = mocker.patch(
@@ -916,7 +896,7 @@ def test_download_pair_history_with_pair_candles(mocker, default_conf, tmp_path,
assert get_historic_ohlcv_mock.call_count == 0
# Verify the log message indicating parallel method was used (line 315-316)
assert log_has("Downloaded data for TEST/BTC, 5m, spot with length 3. Parallel Method.", caplog)
assert log_has("Downloaded data for TEST/BTC with length 3. Parallel Method.", caplog)
# Verify data was stored
assert data_handler_mock.ohlcv_store.call_count == 1

View File

@@ -157,8 +157,7 @@ def test_create_stoploss_order_dry_run_binance(default_conf, mocker):
assert "type" in order
assert order["type"] == order_type
assert order["price"] == 217.8
assert order["stopPrice"] == 220
assert order["price"] == 220
assert order["amount"] == 1

View File

@@ -1111,29 +1111,21 @@ def test_create_dry_run_order_fees(
@pytest.mark.parametrize(
"side,limit,offset,is_stop,expected",
"side,limit,offset,expected",
[
("buy", 46.0, 0.0, False, True),
("buy", 46.0, 0.0, True, False),
("buy", 26.0, 0.0, False, True),
("buy", 26.0, 0.0, True, False), # Stop - didn't trigger
("buy", 25.55, 0.0, False, False),
("buy", 25.55, 0.0, True, True), # Stop - triggered
("buy", 1, 0.0, False, False), # Very far away
("buy", 1, 0.0, True, True), # Current price is above stop - triggered
("sell", 25.5, 0.0, False, True),
("sell", 50, 0.0, False, False), # Very far away
("sell", 25.58, 0.0, False, False),
("sell", 25.563, 0.01, False, False),
("sell", 25.563, 0.0, True, False), # stop order - Not triggered, best bid
("sell", 25.566, 0.0, True, True), # stop order - triggered
("sell", 26, 0.01, True, True), # stop order - triggered
("sell", 5.563, 0.01, False, True),
("sell", 5.563, 0.0, True, False), # stop order - not triggered
("buy", 46.0, 0.0, True),
("buy", 26.0, 0.0, True),
("buy", 25.55, 0.0, False),
("buy", 1, 0.0, False), # Very far away
("sell", 25.5, 0.0, True),
("sell", 50, 0.0, False), # Very far away
("sell", 25.58, 0.0, False),
("sell", 25.563, 0.01, False),
("sell", 5.563, 0.01, True),
],
)
def test__dry_is_price_crossed_with_orderbook(
default_conf, mocker, order_book_l2_usd, side, limit, offset, is_stop, expected
default_conf, mocker, order_book_l2_usd, side, limit, offset, expected
):
# Best bid 25.563
# Best ask 25.566
@@ -1142,14 +1134,14 @@ def test__dry_is_price_crossed_with_orderbook(
exchange.fetch_l2_order_book = order_book_l2_usd
orderbook = order_book_l2_usd.return_value
result = exchange._dry_is_price_crossed(
"LTC/USDT", side, limit, orderbook=orderbook, offset=offset, is_stop=is_stop
"LTC/USDT", side, limit, orderbook=orderbook, offset=offset
)
assert result is expected
assert order_book_l2_usd.call_count == 0
# Test without passing orderbook
order_book_l2_usd.reset_mock()
result = exchange._dry_is_price_crossed("LTC/USDT", side, limit, offset=offset, is_stop=is_stop)
result = exchange._dry_is_price_crossed("LTC/USDT", side, limit, offset=offset)
assert result is expected
@@ -1173,10 +1165,7 @@ def test__dry_is_price_crossed_without_orderbook_support(default_conf, mocker):
exchange.fetch_l2_order_book = MagicMock()
mocker.patch(f"{EXMS}.exchange_has", return_value=False)
assert exchange._dry_is_price_crossed("LTC/USDT", "buy", 1.0)
assert exchange._dry_is_price_crossed("LTC/USDT", "sell", 1.0)
assert exchange.fetch_l2_order_book.call_count == 0
assert not exchange._dry_is_price_crossed("LTC/USDT", "buy", 1.0, is_stop=True)
assert not exchange._dry_is_price_crossed("LTC/USDT", "sell", 1.0, is_stop=True)
@pytest.mark.parametrize(
@@ -1187,7 +1176,7 @@ def test__dry_is_price_crossed_without_orderbook_support(default_conf, mocker):
(False, False, "sell", 1.0, "open", None, 0, None),
],
)
def test_check_dry_limit_order_filled(
def test_check_dry_limit_order_filled_parametrized(
default_conf,
mocker,
crossed,
@@ -1231,70 +1220,6 @@ def test_check_dry_limit_order_filled(
assert fee_mock.call_count == expected_calls
@pytest.mark.parametrize(
"immediate,crossed,expected_status,expected_fee_type",
[
(True, True, "closed", "taker"),
(False, True, "closed", "maker"),
(True, False, "open", None),
],
)
def test_check_dry_limit_order_filled_stoploss(
default_conf, mocker, immediate, crossed, expected_status, expected_fee_type, order_book_l2_usd
):
exchange = get_patched_exchange(mocker, default_conf)
mocker.patch.multiple(
EXMS,
exchange_has=MagicMock(return_value=True),
_dry_is_price_crossed=MagicMock(return_value=crossed),
fetch_l2_order_book=order_book_l2_usd,
)
average_mock = mocker.patch(f"{EXMS}.get_dry_market_fill_price", return_value=24.25)
fee_mock = mocker.patch(
f"{EXMS}.add_dry_order_fee",
autospec=True,
side_effect=lambda self, pair, dry_order, taker_or_maker: dry_order,
)
amount = 1.75
order = {
"symbol": "LTC/USDT",
"status": "open",
"type": "limit",
"side": "sell",
"amount": amount,
"filled": 0.0,
"remaining": amount,
"price": 25.0,
"average": 0.0,
"cost": 0.0,
"fee": None,
"ft_order_type": "stoploss",
"stopLossPrice": 24.5,
}
result = exchange.check_dry_limit_order_filled(order, immediate=immediate)
assert result["status"] == expected_status
assert order_book_l2_usd.call_count == 1
if crossed:
assert result["filled"] == amount
assert result["remaining"] == 0
assert result["average"] == 24.25
assert result["cost"] == pytest.approx(amount * 24.25)
assert average_mock.call_count == 1
assert fee_mock.call_count == 1
assert fee_mock.call_args[0][1] == "LTC/USDT"
assert fee_mock.call_args[0][3] == expected_fee_type
else:
assert result["filled"] == 0.0
assert result["remaining"] == amount
assert result["average"] == 0.0
assert average_mock.call_count == 0
assert fee_mock.call_count == 0
@pytest.mark.parametrize(
"side,price,filled,converted",
[
@@ -2388,7 +2313,6 @@ async def test__async_get_historic_ohlcv(default_conf, mocker, caplog, exchange_
5, # volume (in quote currency)
]
]
mocker.patch(f"{EXMS}.verify_candle_type_support", MagicMock())
exchange = get_patched_exchange(mocker, default_conf, exchange=exchange_name)
# Monkey-patch async function
exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv)
@@ -2439,7 +2363,6 @@ def test_refresh_latest_ohlcv(mocker, default_conf_usdt, caplog, candle_type) ->
]
caplog.set_level(logging.DEBUG)
mocker.patch(f"{EXMS}.verify_candle_type_support", MagicMock())
exchange = get_patched_exchange(mocker, default_conf_usdt)
exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv)
@@ -2690,7 +2613,6 @@ def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_mach
ohlcv = generate_test_data_raw("1h", 100, start.strftime("%Y-%m-%d"))
time_machine.move_to(start + timedelta(hours=99, minutes=30))
mocker.patch(f"{EXMS}.verify_candle_type_support", MagicMock())
exchange = get_patched_exchange(mocker, default_conf)
exchange._set_startup_candle_count(default_conf)
@@ -2840,29 +2762,6 @@ def test_refresh_ohlcv_with_cache(mocker, default_conf, time_machine) -> None:
assert ohlcv_mock.call_args_list[0][0][0] == pairs
def test_refresh_latest_ohlcv_funding_rate(mocker, default_conf_usdt, caplog) -> None:
ohlcv = generate_test_data_raw("1h", 24, "2025-01-02 12:00:00+00:00")
funding_data = [{"timestamp": x[0], "fundingRate": x[1]} for x in ohlcv]
caplog.set_level(logging.DEBUG)
exchange = get_patched_exchange(mocker, default_conf_usdt)
exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv)
exchange._api_async.fetch_funding_rate_history = get_mock_coro(funding_data)
pairs = [
("IOTA/USDT:USDT", "8h", CandleType.FUNDING_RATE),
("XRP/USDT:USDT", "1h", CandleType.FUNDING_RATE),
]
# empty dicts
assert not exchange._klines
res = exchange.refresh_latest_ohlcv(pairs, cache=False)
assert len(res) == len(pairs)
assert log_has_re(r"Wrong funding rate timeframe 8h for pair IOTA/USDT:USDT", caplog)
assert not log_has_re(r"Wrong funding rate timeframe 8h for pair XRP/USDT:USDT", caplog)
assert exchange._api_async.fetch_ohlcv.call_count == 0
@pytest.mark.parametrize("exchange_name", EXCHANGES)
async def test__async_get_candle_history(default_conf, mocker, caplog, exchange_name):
ohlcv = [
@@ -5330,7 +5229,6 @@ def test_combine_funding_and_mark(
{"date": trade_date, "open": mark_price},
]
)
# Test fallback to futures funding rate for missing funding rates
df = exchange.combine_funding_and_mark(funding_rates, mark_rates, futures_funding_rate)
if futures_funding_rate is not None:
@@ -5358,34 +5256,6 @@ def test_combine_funding_and_mark(
assert len(df) == 0
# Test fallback to futures funding rate for middle missing funding rate
funding_rates = DataFrame(
[
{"date": prior2_date, "open": funding_rate},
# missing 1 hour
{"date": trade_date, "open": funding_rate},
],
)
mark_rates = DataFrame(
[
{"date": prior2_date, "open": mark_price},
{"date": prior_date, "open": mark_price},
{"date": trade_date, "open": mark_price},
]
)
df = exchange.combine_funding_and_mark(funding_rates, mark_rates, futures_funding_rate)
if futures_funding_rate is not None:
assert len(df) == 2
assert df.iloc[0]["open_fund"] == funding_rate
# assert df.iloc[1]["open_fund"] == futures_funding_rate
assert df.iloc[-1]["open_fund"] == funding_rate
# Mid-candle is dropped ...
assert df["date"].to_list() == [prior2_date, trade_date]
else:
assert len(df) == 2
assert df["date"].to_list() == [prior2_date, trade_date]
@pytest.mark.parametrize(
"exchange,rate_start,rate_end,d1,d2,amount,expected_fees",
@@ -5475,13 +5345,8 @@ def test__fetch_and_calculate_funding_fees(
api_mock = MagicMock()
api_mock.fetch_funding_rate_history = get_mock_coro(return_value=funding_rate_history)
api_mock.fetch_ohlcv = get_mock_coro(return_value=mark_ohlcv)
type(api_mock).has = PropertyMock(
return_value={
"fetchFundingRateHistory": True,
"fetchMarkOHLCV": True,
"fetchOHLCV": True,
}
)
type(api_mock).has = PropertyMock(return_value={"fetchOHLCV": True})
type(api_mock).has = PropertyMock(return_value={"fetchFundingRateHistory": True})
ex = get_patched_exchange(mocker, default_conf, api_mock, exchange=exchange)
mocker.patch(f"{EXMS}.timeframes", PropertyMock(return_value=["1h", "4h", "8h"]))
@@ -5525,13 +5390,8 @@ def test__fetch_and_calculate_funding_fees_datetime_called(
api_mock.fetch_funding_rate_history = get_mock_coro(
return_value=funding_rate_history_octohourly
)
type(api_mock).has = PropertyMock(
return_value={
"fetchFundingRateHistory": True,
"fetchMarkOHLCV": True,
"fetchOHLCV": True,
}
)
type(api_mock).has = PropertyMock(return_value={"fetchOHLCV": True})
type(api_mock).has = PropertyMock(return_value={"fetchFundingRateHistory": True})
mocker.patch(f"{EXMS}.timeframes", PropertyMock(return_value=["4h", "8h"]))
exchange = get_patched_exchange(mocker, default_conf, api_mock, exchange=exchange)
d1 = datetime.strptime("2021-08-31 23:00:01 +0000", "%Y-%m-%d %H:%M:%S %z")
@@ -6618,51 +6478,3 @@ def test_fetch_funding_rate(default_conf, mocker, exchange_name):
with pytest.raises(DependencyException, match=r"Pair XRP/ETH not available"):
exchange.fetch_funding_rate(pair="XRP/ETH")
def test_verify_candle_type_support(default_conf, mocker):
api_mock = MagicMock()
type(api_mock).has = PropertyMock(
return_value={
"fetchFundingRateHistory": True,
"fetchIndexOHLCV": True,
"fetchMarkOHLCV": True,
"fetchPremiumIndexOHLCV": False,
}
)
exchange = get_patched_exchange(mocker, default_conf, api_mock)
# Should pass
exchange.verify_candle_type_support("futures")
exchange.verify_candle_type_support(CandleType.FUTURES)
exchange.verify_candle_type_support(CandleType.FUNDING_RATE)
exchange.verify_candle_type_support(CandleType.SPOT)
exchange.verify_candle_type_support(CandleType.MARK)
# Should fail:
with pytest.raises(
OperationalException,
match=r"Exchange .* does not support fetching premiumindex candles\.",
):
exchange.verify_candle_type_support(CandleType.PREMIUMINDEX)
type(api_mock).has = PropertyMock(
return_value={
"fetchFundingRateHistory": False,
"fetchIndexOHLCV": False,
"fetchMarkOHLCV": False,
"fetchPremiumIndexOHLCV": True,
}
)
for candle_type in [
CandleType.FUNDING_RATE,
CandleType.INDEX,
CandleType.MARK,
]:
with pytest.raises(
OperationalException,
match=rf"Exchange .* does not support fetching {candle_type.value} candles\.",
):
exchange.verify_candle_type_support(candle_type)
exchange.verify_candle_type_support(CandleType.PREMIUMINDEX)

View File

@@ -123,8 +123,7 @@ def test_create_stoploss_order_dry_run_htx(default_conf, mocker):
assert "type" in order
assert order["type"] == order_type
assert order["price"] == 217.8
assert order["stopPrice"] == 220
assert order["price"] == 220
assert order["amount"] == 1

View File

@@ -515,8 +515,7 @@ EXCHANGES = {
],
},
"hyperliquid": {
# TODO: Should be UBTC/USDC - probably needs a fix in ccxt
"pair": "BTC/USDC",
"pair": "UBTC/USDC",
"stake_currency": "USDC",
"hasQuoteVolume": False,
"timeframe": "30m",

View File

@@ -270,14 +270,11 @@ class TestCCXTExchange:
assert exch.klines(pair_tf).iloc[-1]["date"] >= timeframe_to_prev_date(timeframe, now)
assert exch.klines(pair_tf)["date"].astype(int).iloc[0] // 1e6 == since_ms
def _ccxt__async_get_candle_history(
self, exchange, pair: str, timeframe: str, candle_type: CandleType, factor: float = 0.9
):
def _ccxt__async_get_candle_history(self, exchange, pair, timeframe, candle_type, factor=0.9):
timeframe_ms = timeframe_to_msecs(timeframe)
timeframe_ms_8h = timeframe_to_msecs("8h")
now = timeframe_to_prev_date(timeframe, datetime.now(UTC))
for offset_days in (360, 120, 30, 10, 5, 2):
since = now - timedelta(days=offset_days)
for offset in (360, 120, 30, 10, 5, 2):
since = now - timedelta(days=offset)
since_ms = int(since.timestamp() * 1000)
res = exchange.loop.run_until_complete(
@@ -292,15 +289,8 @@ class TestCCXTExchange:
candles = res[3]
candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * factor
candle_count1 = (now.timestamp() * 1000 - since_ms) // timeframe_ms * factor
# funding fees can be 1h or 8h - depending on pair and time.
candle_count2 = (now.timestamp() * 1000 - since_ms) // timeframe_ms_8h * factor
min_value = min(
candle_count,
candle_count1,
candle_count2 if candle_type == CandleType.FUNDING_RATE else candle_count1,
)
assert len(candles) >= min_value, (
f"{len(candles)} < {candle_count} in {timeframe} {offset_days=} {factor=}"
assert len(candles) >= min(candle_count, candle_count1), (
f"{len(candles)} < {candle_count} in {timeframe}, Offset: {offset} {factor}"
)
# Check if first-timeframe is either the start, or start + 1
assert candles[0][0] == since_ms or (since_ms + timeframe_ms)
@@ -319,8 +309,6 @@ class TestCCXTExchange:
[
CandleType.FUTURES,
CandleType.FUNDING_RATE,
CandleType.INDEX,
CandleType.PREMIUMINDEX,
CandleType.MARK,
],
)
@@ -334,10 +322,6 @@ class TestCCXTExchange:
timeframe = exchange._ft_has.get(
"funding_fee_timeframe", exchange._ft_has["mark_ohlcv_timeframe"]
)
else:
# never skip funding rate!
if not exchange.check_candle_type_support(candle_type):
pytest.skip(f"Exchange does not support candle type {candle_type}")
self._ccxt__async_get_candle_history(
exchange,
pair=pair,
@@ -353,7 +337,6 @@ class TestCCXTExchange:
timeframe_ff = exchange._ft_has.get(
"funding_fee_timeframe", exchange._ft_has["mark_ohlcv_timeframe"]
)
timeframe_ff_8h = "8h"
pair_tf = (pair, timeframe_ff, CandleType.FUNDING_RATE)
funding_ohlcv = exchange.refresh_latest_ohlcv(
@@ -367,26 +350,14 @@ class TestCCXTExchange:
hour1 = timeframe_to_prev_date(timeframe_ff, this_hour - timedelta(minutes=1))
hour2 = timeframe_to_prev_date(timeframe_ff, hour1 - timedelta(minutes=1))
hour3 = timeframe_to_prev_date(timeframe_ff, hour2 - timedelta(minutes=1))
# Alternative 8h timeframe - funding fee timeframe is not stable.
h8_this_hour = timeframe_to_prev_date(timeframe_ff_8h)
h8_hour1 = timeframe_to_prev_date(timeframe_ff_8h, h8_this_hour - timedelta(minutes=1))
h8_hour2 = timeframe_to_prev_date(timeframe_ff_8h, h8_hour1 - timedelta(minutes=1))
h8_hour3 = timeframe_to_prev_date(timeframe_ff_8h, h8_hour2 - timedelta(minutes=1))
row0 = rate.iloc[-1]
row1 = rate.iloc[-2]
row2 = rate.iloc[-3]
row3 = rate.iloc[-4]
assert row0["date"] == this_hour or row0["date"] == h8_this_hour
assert row1["date"] == hour1 or row1["date"] == h8_hour1
assert row2["date"] == hour2 or row2["date"] == h8_hour2
assert row3["date"] == hour3 or row3["date"] == h8_hour3
val0 = rate[rate["date"] == this_hour].iloc[0]["open"]
val1 = rate[rate["date"] == hour1].iloc[0]["open"]
val2 = rate[rate["date"] == hour2].iloc[0]["open"]
val3 = rate[rate["date"] == hour3].iloc[0]["open"]
# Test For last 4 hours
# Avoids random test-failure when funding-fees are 0 for a few hours.
assert (
row0["open"] != 0.0 or row1["open"] != 0.0 or row2["open"] != 0.0 or row3["open"] != 0.0
)
assert val0 != 0.0 or val1 != 0.0 or val2 != 0.0 or val3 != 0.0
# We expect funding rates to be different from 0.0 - or moving around.
assert (
rate["open"].max() != 0.0
@@ -398,10 +369,7 @@ class TestCCXTExchange:
exchange, exchangename = exchange_futures
pair = EXCHANGES[exchangename].get("futures_pair", EXCHANGES[exchangename]["pair"])
since = int((datetime.now(UTC) - timedelta(days=5)).timestamp() * 1000)
candle_type = CandleType.from_string(
exchange.get_option("mark_ohlcv_price", default=CandleType.MARK)
)
pair_tf = (pair, "1h", candle_type)
pair_tf = (pair, "1h", CandleType.MARK)
mark_ohlcv = exchange.refresh_latest_ohlcv([pair_tf], since_ms=since, drop_incomplete=False)

View File

@@ -2548,9 +2548,9 @@ def test_manage_open_orders_exception(
caplog.clear()
freqtrade.manage_open_orders()
assert log_has_re(
r"Cannot query order for Trade\(id=1, pair=ADA/USDT, amount=30, "
r"is_short=False, leverage=1, "
r"open_rate=2, open_since="
r"Cannot query order for Trade\(id=1, pair=ADA/USDT, amount=30.00000000, "
r"is_short=False, leverage=1.0, "
r"open_rate=2.00000000, open_since="
f"{open_trade_usdt.open_date.strftime('%Y-%m-%d %H:%M:%S')}"
r"\) due to Traceback \(most recent call last\):\n*",
caplog,
@@ -3751,8 +3751,8 @@ def test_get_real_amount_quote(
# Amount is reduced by "fee"
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == (amount * 0.001)
assert log_has(
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8, is_short=False,"
" leverage=1, open_rate=0.245441, open_since=closed), fee=0.008.",
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False,"
" leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.008.",
caplog,
)
@@ -3805,8 +3805,8 @@ def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mock
# Amount is reduced by "fee"
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
assert log_has(
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8, "
"is_short=False, leverage=1, open_rate=0.245441, open_since=closed) failed: "
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, "
"is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) failed: "
"myTrade-dict empty found",
caplog,
)
@@ -3825,8 +3825,8 @@ def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mock
0,
True,
(
"Fee for Trade Trade(id=None, pair=LTC/ETH, amount=8, is_short=False, "
"leverage=1, open_rate=0.245441, open_since=closed) [buy]: 0.00094518 BNB -"
"Fee for Trade Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False, "
"leverage=1.0, open_rate=0.24544100, open_since=closed) [buy]: 0.00094518 BNB -"
" rate: None"
),
),
@@ -3836,8 +3836,8 @@ def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mock
0.004,
False,
(
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8, "
"is_short=False, leverage=1, open_rate=0.245441, open_since=closed), fee=0.004."
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, "
"is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.004."
),
),
# invalid, no currency in from fee dict
@@ -3941,8 +3941,8 @@ def test_get_real_amount_multi(
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == expected_amount
assert log_has(
(
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8, "
"is_short=False, leverage=1, open_rate=0.245441, open_since=closed), "
"Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, "
"is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), "
f"fee={expected_amount}."
),
caplog,

View File

@@ -50,14 +50,7 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee,
stoploss_order_mock = MagicMock(side_effect=stop_orders)
# Sell 3rd trade (not called for the first trade)
should_sell_mock = MagicMock(side_effect=[[], [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)]])
def patch_stoploss(order_id, *args, **kwargs):
slo = stoploss_order_open.copy()
slo["id"] = order_id
slo["status"] = "canceled"
return slo
cancel_order_mock = MagicMock(side_effect=patch_stoploss)
cancel_order_mock = MagicMock()
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss,
@@ -800,13 +793,9 @@ def test_dca_handle_similar_open_order(
# Should Create a new exit order
freqtrade.exchange.amount_to_contract_precision = MagicMock(return_value=2)
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-2)
msg = r"Skipping cancelling stoploss on exchange for.*"
mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=False)
assert not log_has_re(msg, caplog)
freqtrade.process()
assert log_has_re(msg, caplog)
trade = Trade.get_trades().first()
assert trade.orders[-2].status == "closed"

View File

@@ -879,10 +879,6 @@ def test_backtest_one_detail(default_conf_usdt, mocker, testdatadir, use_detail)
patch_exchange(mocker)
mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001)
mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf"))
default_conf_usdt["unfilledtimeout"] = {
"entry": 11,
"exit": 30,
}
if use_detail:
default_conf_usdt["timeframe_detail"] = "1m"
@@ -920,7 +916,7 @@ def test_backtest_one_detail(default_conf_usdt, mocker, testdatadir, use_detail)
)
results = result["results"]
assert not results.empty
# Timeout settings from = entry: 11, exit: 30
# Timeout settings from default_conf = entry: 10, exit: 30
assert len(results) == (2 if use_detail else 3)
assert "orders" in results.columns
@@ -970,8 +966,8 @@ def test_backtest_one_detail(default_conf_usdt, mocker, testdatadir, use_detail)
@pytest.mark.parametrize(
"use_detail,exp_funding_fee, exp_ff_updates",
[
(True, -0.0180457882, 15),
(False, -0.0178000543, 12),
(True, -0.018054162, 10),
(False, -0.01780296, 6),
],
)
def test_backtest_one_detail_futures(
@@ -1081,8 +1077,8 @@ def test_backtest_one_detail_futures(
@pytest.mark.parametrize(
"use_detail,entries,max_stake,ff_updates,expected_ff",
[
(True, 50, 3000, 78, -1.17988972),
(False, 6, 360, 34, -0.14673681),
(True, 50, 3000, 55, -1.18038144),
(False, 6, 360, 11, -0.14679994),
],
)
def test_backtest_one_detail_futures_funding_fees(
@@ -1804,7 +1800,7 @@ def test_backtest_multi_pair_detail_simplified(
if use_detail:
# Backtest loop is called once per candle per pair
# Exact numbers depend on trade state - but should be around 2_600
assert bl_spy.call_count > 2_159
assert bl_spy.call_count > 2_170
assert bl_spy.call_count < 2_800
assert len(evaluate_result_multi(results["results"], "1h", 3)) > 0
else:
@@ -2382,12 +2378,13 @@ def test_backtest_start_nomock_futures(default_conf_usdt, mocker, caplog, testda
f"Using data directory: {testdatadir} ...",
"Loading data from 2021-11-17 01:00:00 up to 2021-11-21 04:00:00 (4 days).",
"Backtesting with data from 2021-11-17 21:00:00 up to 2021-11-21 04:00:00 (3 days).",
"XRP/USDT:USDT, funding_rate, 1h, data starts at 2021-11-18 00:00:00",
"XRP/USDT:USDT, funding_rate, 8h, data starts at 2021-11-18 00:00:00",
"XRP/USDT:USDT, mark, 8h, data starts at 2021-11-18 00:00:00",
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
]
for line in exists:
assert log_has(line, caplog), line
assert log_has(line, caplog)
captured = capsys.readouterr()
assert "BACKTESTING REPORT" in captured.out

View File

@@ -372,8 +372,8 @@ def test_borrowed(fee, is_short, lev, borrowed, trading_mode):
@pytest.mark.parametrize(
"is_short,open_rate,close_rate,lev,profit,trading_mode",
[
(False, 2, 2.2, 1, 0.09451372, spot),
(True, 2.2, 2.0, 3, 0.25894253, margin),
(False, 2.0, 2.2, 1.0, 0.09451372, spot),
(True, 2.2, 2.0, 3.0, 0.25894253, margin),
],
)
@pytest.mark.usefixtures("init_persistence")
@@ -493,8 +493,8 @@ def test_update_limit_order(
assert trade.close_date is None
assert log_has_re(
f"LIMIT_{entry_side.upper()} has been fulfilled for "
r"Trade\(id=2, pair=ADA/USDT, amount=30, "
f"is_short={is_short}, leverage={lev}, open_rate={open_rate}, "
r"Trade\(id=2, pair=ADA/USDT, amount=30.00000000, "
f"is_short={is_short}, leverage={lev}, open_rate={open_rate}0000000, "
r"open_since=.*\).",
caplog,
)
@@ -511,8 +511,8 @@ def test_update_limit_order(
assert trade.close_date is not None
assert log_has_re(
f"LIMIT_{exit_side.upper()} has been fulfilled for "
r"Trade\(id=2, pair=ADA/USDT, amount=30, "
f"is_short={is_short}, leverage={lev}, open_rate={open_rate}, "
r"Trade\(id=2, pair=ADA/USDT, amount=30.00000000, "
f"is_short={is_short}, leverage={lev}, open_rate={open_rate}0000000, "
r"open_since=.*\).",
caplog,
)
@@ -545,8 +545,8 @@ def test_update_market_order(market_buy_order_usdt, market_sell_order_usdt, fee,
assert trade.close_date is None
assert log_has_re(
r"MARKET_BUY has been fulfilled for Trade\(id=1, "
r"pair=ADA/USDT, amount=30, is_short=False, leverage=1, "
r"open_rate=2, open_since=.*\).",
r"pair=ADA/USDT, amount=30.00000000, is_short=False, leverage=1.0, "
r"open_rate=2.00000000, open_since=.*\).",
caplog,
)
@@ -561,8 +561,8 @@ def test_update_market_order(market_buy_order_usdt, market_sell_order_usdt, fee,
assert trade.close_date is not None
assert log_has_re(
r"MARKET_SELL has been fulfilled for Trade\(id=1, "
r"pair=ADA/USDT, amount=30, is_short=False, leverage=1, "
r"open_rate=2, open_since=.*\).",
r"pair=ADA/USDT, amount=30.00000000, is_short=False, leverage=1.0, "
r"open_rate=2.00000000, open_since=.*\).",
caplog,
)

View File

@@ -2758,12 +2758,12 @@ def test_list_available_pairs(botclient):
rc = client_get(client, f"{BASE_URI}/available_pairs")
assert_response(rc)
assert rc.json()["length"] == 14
assert rc.json()["length"] == 12
assert isinstance(rc.json()["pairs"], list)
rc = client_get(client, f"{BASE_URI}/available_pairs?timeframe=5m")
assert_response(rc)
assert rc.json()["length"] == 14
assert rc.json()["length"] == 12
rc = client_get(client, f"{BASE_URI}/available_pairs?stake_currency=ETH")
assert_response(rc)

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -20,8 +20,8 @@ def test_binance_mig_data_conversion(default_conf_usdt, tmp_path, testdatadir):
files = [
"-1h-mark.feather",
"-1h-futures.feather",
"-1h-funding_rate.feather",
"-1h-mark.feather",
"-8h-funding_rate.feather",
"-8h-mark.feather",
]
# Copy files to tmpdir and rename to old naming

View File

@@ -108,7 +108,6 @@ def test_format_date() -> None:
date = datetime(2021, 9, 30, 22, 59, 3, 455555, tzinfo=UTC)
assert format_date(date) == "2021-09-30 22:59:03"
assert format_date(None) == ""
assert format_date(None, "closed") == "closed"
def test_format_ms_time_detailed() -> None:

View File

@@ -57,8 +57,6 @@ def test_round_value():
assert round_value(222.2, 0, True) == "222"
assert round_value(float("nan"), 0, True) == "N/A"
assert round_value(float("nan"), 10, True) == "N/A"
assert round_value(None, 10, True) == "N/A"
assert round_value(None, 1, True) == "N/A"
def test_format_duration():

View File

@@ -5,13 +5,13 @@ from freqtrade.util.migrations import migrate_funding_fee_timeframe
def test_migrate_funding_rate_timeframe(default_conf_usdt, tmp_path, testdatadir):
copytree(testdatadir / "futures", tmp_path / "futures")
file_30m = tmp_path / "futures" / "XRP_USDT_USDT-30m-funding_rate.feather"
file_1h_fr = tmp_path / "futures" / "XRP_USDT_USDT-1h-funding_rate.feather"
file_4h = tmp_path / "futures" / "XRP_USDT_USDT-4h-funding_rate.feather"
file_8h = tmp_path / "futures" / "XRP_USDT_USDT-8h-funding_rate.feather"
file_1h = tmp_path / "futures" / "XRP_USDT_USDT-1h-futures.feather"
file_1h_fr.rename(file_30m)
file_8h.rename(file_4h)
assert file_1h.exists()
assert file_30m.exists()
assert not file_1h_fr.exists()
assert file_4h.exists()
assert not file_8h.exists()
default_conf_usdt["datadir"] = tmp_path
@@ -22,7 +22,7 @@ def test_migrate_funding_rate_timeframe(default_conf_usdt, tmp_path, testdatadir
migrate_funding_fee_timeframe(default_conf_usdt, None)
assert not file_30m.exists()
assert file_1h_fr.exists()
assert not file_4h.exists()
assert file_8h.exists()
# futures files is untouched.
assert file_1h.exists()