mirror of
https://github.com/freqtrade/freqtrade.git
synced 2025-11-29 08:33:07 +00:00
Merge branch 'develop' into pr/mrpabloyeah/11561
This commit is contained in:
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@@ -73,17 +73,17 @@ jobs:
|
|||||||
python build_helpers/freqtrade_client_version_align.py
|
python build_helpers/freqtrade_client_version_align.py
|
||||||
|
|
||||||
- name: Tests
|
- name: Tests
|
||||||
if: (!(runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04'))
|
if: (!(runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-24.04'))
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order
|
pytest --random-order
|
||||||
|
|
||||||
- name: Tests with Coveralls
|
- name: Tests with Coveralls
|
||||||
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04')
|
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-24.04')
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order --cov=freqtrade --cov=freqtrade_client --cov-config=.coveragerc
|
pytest --random-order --cov=freqtrade --cov=freqtrade_client --cov-config=.coveragerc
|
||||||
|
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-22.04')
|
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-24.04')
|
||||||
env:
|
env:
|
||||||
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
# Coveralls token. Not used as secret due to github not providing secrets to forked repositories
|
||||||
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
COVERALLS_REPO_TOKEN: 6D1m0xupS3FgutfuGao8keFf9Hc0FpIXu
|
||||||
@@ -139,6 +139,7 @@ jobs:
|
|||||||
ruff format --check
|
ruff format --check
|
||||||
|
|
||||||
- name: Mypy
|
- name: Mypy
|
||||||
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts tests
|
mypy freqtrade scripts tests
|
||||||
|
|
||||||
@@ -264,6 +265,7 @@ jobs:
|
|||||||
ruff format --check
|
ruff format --check
|
||||||
|
|
||||||
- name: Mypy
|
- name: Mypy
|
||||||
|
if: matrix.os == 'macos-15'
|
||||||
run: |
|
run: |
|
||||||
mypy freqtrade scripts
|
mypy freqtrade scripts
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# See https://pre-commit.com/hooks.html for more hooks
|
# See https://pre-commit.com/hooks.html for more hooks
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pycqa/flake8
|
- repo: https://github.com/pycqa/flake8
|
||||||
rev: "7.1.2"
|
rev: "7.2.0"
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
additional_dependencies: [Flake8-pyproject]
|
additional_dependencies: [Flake8-pyproject]
|
||||||
@@ -16,10 +16,10 @@ repos:
|
|||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- types-cachetools==5.5.0.20240820
|
- types-cachetools==5.5.0.20240820
|
||||||
- types-filelock==3.2.7
|
- types-filelock==3.2.7
|
||||||
- types-requests==2.32.0.20250306
|
- types-requests==2.32.0.20250328
|
||||||
- types-tabulate==0.9.0.20241207
|
- types-tabulate==0.9.0.20241207
|
||||||
- types-python-dateutil==2.9.0.20241206
|
- types-python-dateutil==2.9.0.20241206
|
||||||
- SQLAlchemy==2.0.39
|
- SQLAlchemy==2.0.40
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
|
|||||||
@@ -435,6 +435,20 @@ To save time, by default backtest will reuse a cached result from within the las
|
|||||||
To further analyze your backtest results, freqtrade will export the trades to file by default.
|
To further analyze your backtest results, freqtrade will export the trades to file by default.
|
||||||
You can then load the trades to perform further analysis as shown in the [data analysis](strategy_analysis_example.md#load-backtest-results-to-pandas-dataframe) backtesting section.
|
You can then load the trades to perform further analysis as shown in the [data analysis](strategy_analysis_example.md#load-backtest-results-to-pandas-dataframe) backtesting section.
|
||||||
|
|
||||||
|
### Backtest output file
|
||||||
|
|
||||||
|
The output file freqtrade produces is a zip file containing the following files:
|
||||||
|
|
||||||
|
- The backtest report in json format
|
||||||
|
- the market change data in feather format
|
||||||
|
- a copy of the strategy file
|
||||||
|
- a copy of the strategy parameters (if a parameter file was used)
|
||||||
|
- a sanitized copy of the config file
|
||||||
|
|
||||||
|
This will ensure results are reproducible - under the assumption that the same data is available.
|
||||||
|
|
||||||
|
Only the strategy file and the config file are included in the zip file, eventual dependencies are not included.
|
||||||
|
|
||||||
## Assumptions made by backtesting
|
## Assumptions made by backtesting
|
||||||
|
|
||||||
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
Since backtesting lacks some detailed information about what happens within a candle, it needs to take a few assumptions:
|
||||||
|
|||||||
@@ -363,6 +363,10 @@ Hyperliquid handles deposits and withdrawals on the Arbitrum One chain, a Layer
|
|||||||
* Create a different software wallet, only transfer the funds you want to trade with to that wallet, and use that wallet to trade on Hyperliquid.
|
* Create a different software wallet, only transfer the funds you want to trade with to that wallet, and use that wallet to trade on Hyperliquid.
|
||||||
* If you have funds you don't want to use for trading (after making a profit for example), transfer them back to your hardware wallet.
|
* If you have funds you don't want to use for trading (after making a profit for example), transfer them back to your hardware wallet.
|
||||||
|
|
||||||
|
### Historic Hyperliquid data
|
||||||
|
|
||||||
|
The Hyperliquid API does not provide historic data beyond the single call to fetch current data, so downloading data is not possible, as the downloaded data would not constitute proper historic data.
|
||||||
|
|
||||||
## All exchanges
|
## All exchanges
|
||||||
|
|
||||||
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.
|
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.
|
||||||
|
|||||||
@@ -258,6 +258,8 @@ freqtrade trade --config config_examples/config_freqai.example.json --strategy F
|
|||||||
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file.
|
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file.
|
||||||
This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
|
This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
|
||||||
|
|
||||||
|
PyTorch dropped support for macOS x64 (intel based Apple devices) in version 2.3. Subsequently, freqtrade also dropped support for PyTorch on this platform.
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
#### Model
|
#### Model
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
markdown==3.7
|
markdown==3.7
|
||||||
mkdocs==1.6.1
|
mkdocs==1.6.1
|
||||||
mkdocs-material==9.6.9
|
mkdocs-material==9.6.10
|
||||||
mdx_truly_sane_lists==1.3
|
mdx_truly_sane_lists==1.3
|
||||||
pymdown-extensions==10.14.3
|
pymdown-extensions==10.14.3
|
||||||
jinja2==3.1.6
|
jinja2==3.1.6
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
"""Freqtrade bot"""
|
"""Freqtrade bot"""
|
||||||
|
|
||||||
__version__ = "2025.3-dev"
|
__version__ = "2025.4-dev"
|
||||||
|
|
||||||
if "dev" in __version__:
|
if "dev" in __version__:
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ def _calc_drawdown_series(
|
|||||||
) -> pd.DataFrame:
|
) -> pd.DataFrame:
|
||||||
max_drawdown_df = pd.DataFrame()
|
max_drawdown_df = pd.DataFrame()
|
||||||
max_drawdown_df["cumulative"] = profit_results[value_col].cumsum()
|
max_drawdown_df["cumulative"] = profit_results[value_col].cumsum()
|
||||||
max_drawdown_df["high_value"] = max_drawdown_df["cumulative"].cummax()
|
max_drawdown_df["high_value"] = np.maximum(0, max_drawdown_df["cumulative"].cummax())
|
||||||
max_drawdown_df["drawdown"] = max_drawdown_df["cumulative"] - max_drawdown_df["high_value"]
|
max_drawdown_df["drawdown"] = max_drawdown_df["cumulative"] - max_drawdown_df["high_value"]
|
||||||
max_drawdown_df["date"] = profit_results.loc[:, date_col]
|
max_drawdown_df["date"] = profit_results.loc[:, date_col]
|
||||||
if starting_balance:
|
if starting_balance:
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -44,6 +44,7 @@ class FtHas(TypedDict, total=False):
|
|||||||
funding_fee_timeframe: str
|
funding_fee_timeframe: str
|
||||||
funding_fee_candle_limit: int
|
funding_fee_candle_limit: int
|
||||||
floor_leverage: bool
|
floor_leverage: bool
|
||||||
|
uses_leverage_tiers: bool
|
||||||
needs_trading_fees: bool
|
needs_trading_fees: bool
|
||||||
order_props_in_contracts: list[Literal["amount", "cost", "filled", "remaining"]]
|
order_props_in_contracts: list[Literal["amount", "cost", "filled", "remaining"]]
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ class Hyperliquid(Exchange):
|
|||||||
"stop_price_prop": "stopPrice",
|
"stop_price_prop": "stopPrice",
|
||||||
"funding_fee_timeframe": "1h",
|
"funding_fee_timeframe": "1h",
|
||||||
"funding_fee_candle_limit": 500,
|
"funding_fee_candle_limit": 500,
|
||||||
|
"uses_leverage_tiers": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [
|
_supported_trading_mode_margin_pairs: list[tuple[TradingMode, MarginMode]] = [
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ class Kraken(Exchange):
|
|||||||
consolidated: CcxtBalances = {}
|
consolidated: CcxtBalances = {}
|
||||||
for currency, balance in balances.items():
|
for currency, balance in balances.items():
|
||||||
base_currency = currency[:-2] if currency.endswith(".F") else currency
|
base_currency = currency[:-2] if currency.endswith(".F") else currency
|
||||||
base_currency = self._api.commonCurrencies.get(base_currency, base_currency)
|
|
||||||
if base_currency in consolidated:
|
if base_currency in consolidated:
|
||||||
consolidated[base_currency]["free"] += balance["free"]
|
consolidated[base_currency]["free"] += balance["free"]
|
||||||
consolidated[base_currency]["used"] += balance["used"]
|
consolidated[base_currency]["used"] += balance["used"]
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from typing import Any
|
from copy import deepcopy
|
||||||
|
from typing import Any, cast
|
||||||
|
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
@@ -15,11 +16,16 @@ class BacktestResultType(TypedDict):
|
|||||||
|
|
||||||
|
|
||||||
def get_BacktestResultType_default() -> BacktestResultType:
|
def get_BacktestResultType_default() -> BacktestResultType:
|
||||||
return {
|
return cast(
|
||||||
|
BacktestResultType,
|
||||||
|
deepcopy(
|
||||||
|
{
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"strategy": {},
|
"strategy": {},
|
||||||
"strategy_comparison": [],
|
"strategy_comparison": [],
|
||||||
}
|
}
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BacktestHistoryEntryType(BacktestMetadataType):
|
class BacktestHistoryEntryType(BacktestMetadataType):
|
||||||
|
|||||||
@@ -360,8 +360,9 @@ class Backtesting:
|
|||||||
)
|
)
|
||||||
# Combine data to avoid combining the data per trade.
|
# Combine data to avoid combining the data per trade.
|
||||||
unavailable_pairs = []
|
unavailable_pairs = []
|
||||||
|
uses_leverage_tiers = self.exchange.get_option("uses_leverage_tiers", True)
|
||||||
for pair in self.pairlists.whitelist:
|
for pair in self.pairlists.whitelist:
|
||||||
if pair not in self.exchange._leverage_tiers:
|
if uses_leverage_tiers and pair not in self.exchange._leverage_tiers:
|
||||||
unavailable_pairs.append(pair)
|
unavailable_pairs.append(pair)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -1792,6 +1793,7 @@ class Backtesting:
|
|||||||
dt_appendix,
|
dt_appendix,
|
||||||
market_change_data=combined_res,
|
market_change_data=combined_res,
|
||||||
analysis_results=self.analysis_results,
|
analysis_results=self.analysis_results,
|
||||||
|
strategy_files={s.get_strategy_name(): s.__file__ for s in self.strategylist},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Results may be mixed up now. Sort them so they follow --strategy-list order.
|
# Results may be mixed up now. Sort them so they follow --strategy-list order.
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from zipfile import ZIP_DEFLATED, ZipFile
|
|||||||
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
from freqtrade.configuration import sanitize_config
|
||||||
from freqtrade.constants import LAST_BT_RESULT_FN
|
from freqtrade.constants import LAST_BT_RESULT_FN
|
||||||
from freqtrade.enums.runmode import RunMode
|
from freqtrade.enums.runmode import RunMode
|
||||||
from freqtrade.ft_types import BacktestResultType
|
from freqtrade.ft_types import BacktestResultType
|
||||||
@@ -52,6 +53,7 @@ def store_backtest_results(
|
|||||||
*,
|
*,
|
||||||
market_change_data: DataFrame | None = None,
|
market_change_data: DataFrame | None = None,
|
||||||
analysis_results: dict[str, dict[str, DataFrame]] | None = None,
|
analysis_results: dict[str, dict[str, DataFrame]] | None = None,
|
||||||
|
strategy_files: dict[str, str] | None = None,
|
||||||
) -> Path:
|
) -> Path:
|
||||||
"""
|
"""
|
||||||
Stores backtest results and analysis data in a zip file, with metadata stored separately
|
Stores backtest results and analysis data in a zip file, with metadata stored separately
|
||||||
@@ -85,6 +87,32 @@ def store_backtest_results(
|
|||||||
dump_json_to_file(stats_buf, stats_copy)
|
dump_json_to_file(stats_buf, stats_copy)
|
||||||
zipf.writestr(json_filename.name, stats_buf.getvalue())
|
zipf.writestr(json_filename.name, stats_buf.getvalue())
|
||||||
|
|
||||||
|
config_buf = StringIO()
|
||||||
|
dump_json_to_file(config_buf, sanitize_config(config["original_config"]))
|
||||||
|
zipf.writestr(f"{base_filename.stem}_config.json", config_buf.getvalue())
|
||||||
|
|
||||||
|
for strategy_name, strategy_file in (strategy_files or {}).items():
|
||||||
|
# Store the strategy file and its parameters
|
||||||
|
strategy_buf = BytesIO()
|
||||||
|
strategy_path = Path(strategy_file)
|
||||||
|
if not strategy_path.is_file():
|
||||||
|
logger.warning(f"Strategy file '{strategy_path}' does not exist. Skipping.")
|
||||||
|
continue
|
||||||
|
with strategy_path.open("rb") as strategy_file_obj:
|
||||||
|
strategy_buf.write(strategy_file_obj.read())
|
||||||
|
strategy_buf.seek(0)
|
||||||
|
zipf.writestr(f"{base_filename.stem}_{strategy_name}.py", strategy_buf.getvalue())
|
||||||
|
strategy_params = strategy_path.with_suffix(".json")
|
||||||
|
if strategy_params.is_file():
|
||||||
|
strategy_params_buf = BytesIO()
|
||||||
|
with strategy_params.open("rb") as strategy_params_obj:
|
||||||
|
strategy_params_buf.write(strategy_params_obj.read())
|
||||||
|
strategy_params_buf.seek(0)
|
||||||
|
zipf.writestr(
|
||||||
|
f"{base_filename.stem}_{strategy_name}.json",
|
||||||
|
strategy_params_buf.getvalue(),
|
||||||
|
)
|
||||||
|
|
||||||
# Add market change data if present
|
# Add market change data if present
|
||||||
if market_change_data is not None:
|
if market_change_data is not None:
|
||||||
market_change_name = f"{base_filename.stem}_market_change.feather"
|
market_change_name = f"{base_filename.stem}_market_change.feather"
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from freqtrade.data.metrics import (
|
|||||||
calculate_sortino,
|
calculate_sortino,
|
||||||
calculate_sqn,
|
calculate_sqn,
|
||||||
)
|
)
|
||||||
from freqtrade.ft_types import BacktestResultType
|
from freqtrade.ft_types import BacktestResultType, get_BacktestResultType_default
|
||||||
from freqtrade.util import decimals_per_coin, fmt_coin, get_dry_run_wallet
|
from freqtrade.util import decimals_per_coin, fmt_coin, get_dry_run_wallet
|
||||||
|
|
||||||
|
|
||||||
@@ -644,11 +644,7 @@ def generate_backtest_stats(
|
|||||||
:param max_date: Backtest end date
|
:param max_date: Backtest end date
|
||||||
:return: Dictionary containing results per strategy and a strategy summary.
|
:return: Dictionary containing results per strategy and a strategy summary.
|
||||||
"""
|
"""
|
||||||
result: BacktestResultType = {
|
result: BacktestResultType = get_BacktestResultType_default()
|
||||||
"metadata": {},
|
|
||||||
"strategy": {},
|
|
||||||
"strategy_comparison": [],
|
|
||||||
}
|
|
||||||
market_change = calculate_market_change(btdata, "close")
|
market_change = calculate_market_change(btdata, "close")
|
||||||
metadata = {}
|
metadata = {}
|
||||||
pairlist = list(btdata.keys())
|
pairlist = list(btdata.keys())
|
||||||
|
|||||||
@@ -108,6 +108,9 @@ def __run_backtest_bg(btconfig: Config):
|
|||||||
ApiBG.bt["bt"].results,
|
ApiBG.bt["bt"].results,
|
||||||
datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
|
datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
|
||||||
market_change_data=combined_res,
|
market_change_data=combined_res,
|
||||||
|
strategy_files={
|
||||||
|
s.get_strategy_name(): s.__file__ for s in ApiBG.bt["bt"].strategylist
|
||||||
|
},
|
||||||
)
|
)
|
||||||
ApiBG.bt["bt"].results["metadata"][strategy_name]["filename"] = str(fn.stem)
|
ApiBG.bt["bt"].results["metadata"][strategy_name]["filename"] = str(fn.stem)
|
||||||
ApiBG.bt["bt"].results["metadata"][strategy_name]["strategy"] = strategy_name
|
ApiBG.bt["bt"].results["metadata"][strategy_name]["strategy"] = strategy_name
|
||||||
|
|||||||
@@ -132,6 +132,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
|||||||
stake_currency: str
|
stake_currency: str
|
||||||
# container variable for strategy source code
|
# container variable for strategy source code
|
||||||
__source__: str = ""
|
__source__: str = ""
|
||||||
|
__file__: str = ""
|
||||||
|
|
||||||
# Definition of plot_config. See plotting documentation for more details.
|
# Definition of plot_config. See plotting documentation for more details.
|
||||||
plot_config: dict = {}
|
plot_config: dict = {}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from freqtrade_client.ft_rest_client import FtRestClient
|
from freqtrade_client.ft_rest_client import FtRestClient
|
||||||
|
|
||||||
|
|
||||||
__version__ = "2025.3-dev"
|
__version__ = "2025.4-dev"
|
||||||
|
|
||||||
if "dev" in __version__:
|
if "dev" in __version__:
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ ruff==0.11.2
|
|||||||
mypy==1.15.0
|
mypy==1.15.0
|
||||||
pre-commit==4.2.0
|
pre-commit==4.2.0
|
||||||
pytest==8.3.5
|
pytest==8.3.5
|
||||||
pytest-asyncio==0.25.3
|
pytest-asyncio==0.26.0
|
||||||
pytest-cov==6.0.0
|
pytest-cov==6.0.0
|
||||||
pytest-mock==3.14.0
|
pytest-mock==3.14.0
|
||||||
pytest-random-order==1.1.1
|
pytest-random-order==1.1.1
|
||||||
@@ -27,6 +27,6 @@ nbconvert==7.16.6
|
|||||||
# mypy types
|
# mypy types
|
||||||
types-cachetools==5.5.0.20240820
|
types-cachetools==5.5.0.20240820
|
||||||
types-filelock==3.2.7
|
types-filelock==3.2.7
|
||||||
types-requests==2.32.0.20250306
|
types-requests==2.32.0.20250328
|
||||||
types-tabulate==0.9.0.20241207
|
types-tabulate==0.9.0.20241207
|
||||||
types-python-dateutil==2.9.0.20241206
|
types-python-dateutil==2.9.0.20241206
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
-r requirements-freqai.txt
|
-r requirements-freqai.txt
|
||||||
|
|
||||||
# Required for freqai-rl
|
# Required for freqai-rl
|
||||||
torch==2.2.2; sys_platform == 'darwin' and platform_machine == 'x86_64'
|
|
||||||
torch==2.6.0; sys_platform != 'darwin' or platform_machine != 'x86_64'
|
torch==2.6.0; sys_platform != 'darwin' or platform_machine != 'x86_64'
|
||||||
gymnasium==0.29.1
|
gymnasium==0.29.1
|
||||||
# SB3 >=2.5.0 depends on torch 2.3.0 - which implies it dropped support x86 macos
|
# SB3 >=2.5.0 depends on torch 2.3.0 - which implies it dropped support x86 macos
|
||||||
|
|||||||
@@ -4,14 +4,14 @@ bottleneck==1.4.2
|
|||||||
numexpr==2.10.2
|
numexpr==2.10.2
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==4.4.69
|
ccxt==4.4.71
|
||||||
cryptography==44.0.2
|
cryptography==44.0.2
|
||||||
aiohttp==3.9.5
|
aiohttp==3.9.5
|
||||||
SQLAlchemy==2.0.39
|
SQLAlchemy==2.0.40
|
||||||
python-telegram-bot==22.0
|
python-telegram-bot==22.0
|
||||||
# can't be hard-pinned due to telegram-bot pinning httpx with ~
|
# can't be hard-pinned due to telegram-bot pinning httpx with ~
|
||||||
httpx>=0.24.1
|
httpx>=0.24.1
|
||||||
humanize==4.12.1
|
humanize==4.12.2
|
||||||
cachetools==5.5.2
|
cachetools==5.5.2
|
||||||
requests==2.32.3
|
requests==2.32.3
|
||||||
urllib3==2.3.0
|
urllib3==2.3.0
|
||||||
@@ -22,7 +22,7 @@ tabulate==0.9.0
|
|||||||
pycoingecko==3.2.0
|
pycoingecko==3.2.0
|
||||||
jinja2==3.1.6
|
jinja2==3.1.6
|
||||||
joblib==1.4.2
|
joblib==1.4.2
|
||||||
rich==13.9.4
|
rich==14.0.0
|
||||||
pyarrow==19.0.1; platform_machine != 'armv7l'
|
pyarrow==19.0.1; platform_machine != 'armv7l'
|
||||||
|
|
||||||
# find first, C search in arrays
|
# find first, C search in arrays
|
||||||
@@ -31,14 +31,14 @@ py_find_1st==1.1.7
|
|||||||
# Load ticker files 30% faster
|
# Load ticker files 30% faster
|
||||||
python-rapidjson==1.20
|
python-rapidjson==1.20
|
||||||
# Properly format api responses
|
# Properly format api responses
|
||||||
orjson==3.10.15
|
orjson==3.10.16
|
||||||
|
|
||||||
# Notify systemd
|
# Notify systemd
|
||||||
sdnotify==0.3.2
|
sdnotify==0.3.2
|
||||||
|
|
||||||
# API Server
|
# API Server
|
||||||
fastapi==0.115.12
|
fastapi==0.115.12
|
||||||
pydantic==2.10.6
|
pydantic==2.11.1
|
||||||
uvicorn==0.34.0
|
uvicorn==0.34.0
|
||||||
pyjwt==2.10.1
|
pyjwt==2.10.1
|
||||||
aiofiles==24.1.0
|
aiofiles==24.1.0
|
||||||
@@ -49,7 +49,7 @@ questionary==2.1.0
|
|||||||
prompt-toolkit==3.0.50
|
prompt-toolkit==3.0.50
|
||||||
# Extensions to datetime library
|
# Extensions to datetime library
|
||||||
python-dateutil==2.9.0.post0
|
python-dateutil==2.9.0.post0
|
||||||
pytz==2025.1
|
pytz==2025.2
|
||||||
|
|
||||||
#Futures
|
#Futures
|
||||||
schedule==1.2.2
|
schedule==1.2.2
|
||||||
|
|||||||
@@ -652,6 +652,7 @@ def get_default_conf(testdatadir):
|
|||||||
"trading_mode": "spot",
|
"trading_mode": "spot",
|
||||||
"margin_mode": "",
|
"margin_mode": "",
|
||||||
"candle_type_def": CandleType.SPOT,
|
"candle_type_def": CandleType.SPOT,
|
||||||
|
"original_config": {},
|
||||||
}
|
}
|
||||||
return configuration
|
return configuration
|
||||||
|
|
||||||
|
|||||||
@@ -569,7 +569,7 @@ def test_calculate_max_drawdown2():
|
|||||||
df1.loc[:, "profit"] = df1["profit"] * -1
|
df1.loc[:, "profit"] = df1["profit"] * -1
|
||||||
# No winning trade ...
|
# No winning trade ...
|
||||||
drawdown = calculate_max_drawdown(df1, date_col="open_date", value_col="profit")
|
drawdown = calculate_max_drawdown(df1, date_col="open_date", value_col="profit")
|
||||||
assert drawdown.drawdown_abs == 0.043965
|
assert drawdown.drawdown_abs == 0.055545
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ def test_get_balances_prod_kraken(default_conf, mocker):
|
|||||||
"4TH": balance_item.copy(),
|
"4TH": balance_item.copy(),
|
||||||
"EUR": balance_item.copy(),
|
"EUR": balance_item.copy(),
|
||||||
"BTC": {"free": 0.0, "total": 0.0, "used": 0.0},
|
"BTC": {"free": 0.0, "total": 0.0, "used": 0.0},
|
||||||
"XBT.F": balance_item.copy(),
|
"BTC.F": balance_item.copy(),
|
||||||
"timestamp": 123123,
|
"timestamp": 123123,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ EXCHANGES = {
|
|||||||
"ADA.F": {"free": 2.0, "total": 2.0, "used": 0.0},
|
"ADA.F": {"free": 2.0, "total": 2.0, "used": 0.0},
|
||||||
"BTC": {"free": 0.0006, "total": 0.0006, "used": 0.0},
|
"BTC": {"free": 0.0006, "total": 0.0006, "used": 0.0},
|
||||||
# XBT.F should be mapped to BTC.F
|
# XBT.F should be mapped to BTC.F
|
||||||
"XBT.F": {"free": 0.001, "total": 0.001, "used": 0.0},
|
"BTC.F": {"free": 0.001, "total": 0.001, "used": 0.0},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import shutil
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile
|
from shutil import copyfile
|
||||||
@@ -41,7 +42,7 @@ from freqtrade.optimize.optimize_reports.optimize_reports import (
|
|||||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||||
from freqtrade.util import dt_ts
|
from freqtrade.util import dt_ts
|
||||||
from freqtrade.util.datetime_helpers import dt_from_ts, dt_utc
|
from freqtrade.util.datetime_helpers import dt_from_ts, dt_utc
|
||||||
from tests.conftest import CURRENT_TEST_STRATEGY
|
from tests.conftest import CURRENT_TEST_STRATEGY, log_has_re
|
||||||
from tests.data.test_history import _clean_test_file
|
from tests.data.test_history import _clean_test_file
|
||||||
|
|
||||||
|
|
||||||
@@ -263,8 +264,9 @@ def test_store_backtest_results(testdatadir, mocker):
|
|||||||
dump_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.file_dump_json")
|
dump_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.file_dump_json")
|
||||||
zip_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.ZipFile")
|
zip_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.ZipFile")
|
||||||
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||||
|
store_backtest_results(
|
||||||
store_backtest_results({"exportfilename": testdatadir}, data, "2022_01_01_15_05_13")
|
{"exportfilename": testdatadir, "original_config": {}}, data, "2022_01_01_15_05_13"
|
||||||
|
)
|
||||||
|
|
||||||
assert dump_mock.call_count == 2
|
assert dump_mock.call_count == 2
|
||||||
assert zip_mock.call_count == 1
|
assert zip_mock.call_count == 1
|
||||||
@@ -274,7 +276,9 @@ def test_store_backtest_results(testdatadir, mocker):
|
|||||||
dump_mock.reset_mock()
|
dump_mock.reset_mock()
|
||||||
zip_mock.reset_mock()
|
zip_mock.reset_mock()
|
||||||
filename = testdatadir / "testresult.json"
|
filename = testdatadir / "testresult.json"
|
||||||
store_backtest_results({"exportfilename": filename}, data, "2022_01_01_15_05_13")
|
store_backtest_results(
|
||||||
|
{"exportfilename": filename, "original_config": {}}, data, "2022_01_01_15_05_13"
|
||||||
|
)
|
||||||
assert dump_mock.call_count == 2
|
assert dump_mock.call_count == 2
|
||||||
assert zip_mock.call_count == 1
|
assert zip_mock.call_count == 1
|
||||||
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
assert isinstance(dump_mock.call_args_list[0][0][0], Path)
|
||||||
@@ -282,9 +286,16 @@ def test_store_backtest_results(testdatadir, mocker):
|
|||||||
assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / "testresult"))
|
assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / "testresult"))
|
||||||
|
|
||||||
|
|
||||||
def test_store_backtest_results_real(tmp_path):
|
def test_store_backtest_results_real(tmp_path, caplog):
|
||||||
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
data = {"metadata": {}, "strategy": {}, "strategy_comparison": []}
|
||||||
store_backtest_results({"exportfilename": tmp_path}, data, "2022_01_01_15_05_13")
|
config = {
|
||||||
|
"exportfilename": tmp_path,
|
||||||
|
"original_config": {},
|
||||||
|
}
|
||||||
|
store_backtest_results(
|
||||||
|
config, data, "2022_01_01_15_05_13", strategy_files={"DefStrat": "NoFile"}
|
||||||
|
)
|
||||||
|
assert log_has_re(r"Strategy file .* does not exist\. Skipping\.", caplog)
|
||||||
|
|
||||||
zip_file = tmp_path / "backtest-result-2022_01_01_15_05_13.zip"
|
zip_file = tmp_path / "backtest-result-2022_01_01_15_05_13.zip"
|
||||||
assert zip_file.is_file()
|
assert zip_file.is_file()
|
||||||
@@ -297,8 +308,19 @@ def test_store_backtest_results_real(tmp_path):
|
|||||||
fn = get_latest_backtest_filename(tmp_path)
|
fn = get_latest_backtest_filename(tmp_path)
|
||||||
assert fn == "backtest-result-2022_01_01_15_05_13.zip"
|
assert fn == "backtest-result-2022_01_01_15_05_13.zip"
|
||||||
|
|
||||||
|
strategy_test_dir = Path(__file__).parent.parent / "strategy" / "strats"
|
||||||
|
|
||||||
|
shutil.copy(strategy_test_dir / "strategy_test_v3.py", tmp_path)
|
||||||
|
params_file = tmp_path / "strategy_test_v3.json"
|
||||||
|
with params_file.open("w") as f:
|
||||||
|
f.write("""{"strategy_name": "TurtleStrategyX5","params":{}}""")
|
||||||
|
|
||||||
store_backtest_results(
|
store_backtest_results(
|
||||||
{"exportfilename": tmp_path}, data, "2024_01_01_15_05_25", market_change_data=pd.DataFrame()
|
config,
|
||||||
|
data,
|
||||||
|
"2024_01_01_15_05_25",
|
||||||
|
market_change_data=pd.DataFrame(),
|
||||||
|
strategy_files={"DefStrat": str(tmp_path / "strategy_test_v3.py")},
|
||||||
)
|
)
|
||||||
zip_file = tmp_path / "backtest-result-2024_01_01_15_05_25.zip"
|
zip_file = tmp_path / "backtest-result-2024_01_01_15_05_25.zip"
|
||||||
assert zip_file.is_file()
|
assert zip_file.is_file()
|
||||||
@@ -308,6 +330,22 @@ def test_store_backtest_results_real(tmp_path):
|
|||||||
with ZipFile(zip_file, "r") as zipf:
|
with ZipFile(zip_file, "r") as zipf:
|
||||||
assert "backtest-result-2024_01_01_15_05_25.json" in zipf.namelist()
|
assert "backtest-result-2024_01_01_15_05_25.json" in zipf.namelist()
|
||||||
assert "backtest-result-2024_01_01_15_05_25_market_change.feather" in zipf.namelist()
|
assert "backtest-result-2024_01_01_15_05_25_market_change.feather" in zipf.namelist()
|
||||||
|
assert "backtest-result-2024_01_01_15_05_25_config.json" in zipf.namelist()
|
||||||
|
# strategy file is copied to the zip file
|
||||||
|
assert "backtest-result-2024_01_01_15_05_25_DefStrat.py" in zipf.namelist()
|
||||||
|
# compare the content of the strategy file
|
||||||
|
with zipf.open("backtest-result-2024_01_01_15_05_25_DefStrat.py") as strategy_file:
|
||||||
|
strategy_content = strategy_file.read()
|
||||||
|
with (strategy_test_dir / "strategy_test_v3.py").open("rb") as original_file:
|
||||||
|
original_content = original_file.read()
|
||||||
|
assert strategy_content == original_content
|
||||||
|
assert "backtest-result-2024_01_01_15_05_25_DefStrat.py" in zipf.namelist()
|
||||||
|
with zipf.open("backtest-result-2024_01_01_15_05_25_DefStrat.json") as pf:
|
||||||
|
params_content = pf.read()
|
||||||
|
with params_file.open("rb") as original_file:
|
||||||
|
original_content = original_file.read()
|
||||||
|
assert params_content == original_content
|
||||||
|
|
||||||
assert (tmp_path / LAST_BT_RESULT_FN).is_file()
|
assert (tmp_path / LAST_BT_RESULT_FN).is_file()
|
||||||
|
|
||||||
# Last file reference should be updated
|
# Last file reference should be updated
|
||||||
@@ -323,6 +361,7 @@ def test_write_read_backtest_candles(tmp_path):
|
|||||||
"exportfilename": tmp_path,
|
"exportfilename": tmp_path,
|
||||||
"export": "signals",
|
"export": "signals",
|
||||||
"runmode": "backtest",
|
"runmode": "backtest",
|
||||||
|
"original_config": {},
|
||||||
}
|
}
|
||||||
# test directory exporting
|
# test directory exporting
|
||||||
sample_date = "2022_01_01_15_05_13"
|
sample_date = "2022_01_01_15_05_13"
|
||||||
|
|||||||
Reference in New Issue
Block a user