mirror of
https://github.com/freqtrade/freqtrade.git
synced 2026-01-19 21:40:24 +00:00
Merge branch 'develop' into maint/python_3.14
This commit is contained in:
5
.github/dependabot.yml
vendored
5
.github/dependabot.yml
vendored
@@ -56,3 +56,8 @@ updates:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: develop
|
||||
groups:
|
||||
actions:
|
||||
patterns:
|
||||
# Combine updates for github provided actions
|
||||
- "actions/*"
|
||||
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
run: python build_helpers/binance_update_lev_tiers.py
|
||||
|
||||
|
||||
- uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
|
||||
- uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
token: ${{ secrets.REPO_SCOPED_TOKEN }}
|
||||
add-paths: freqtrade/exchange/binance_leverage_tiers.json
|
||||
|
||||
14
.github/workflows/ci.yml
vendored
14
.github/workflows/ci.yml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||
uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6
|
||||
with:
|
||||
activate-environment: true
|
||||
enable-cache: true
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
run: |
|
||||
pytest --random-order --cov=freqtrade --cov=freqtrade_client --cov-config=.coveragerc
|
||||
|
||||
- uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1
|
||||
- uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
|
||||
if: (runner.os == 'Linux' && matrix.python-version == '3.12' && matrix.os == 'ubuntu-24.04')
|
||||
with:
|
||||
fail_ci_if_error: true
|
||||
@@ -250,7 +250,7 @@ jobs:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||
uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6
|
||||
with:
|
||||
activate-environment: true
|
||||
enable-cache: true
|
||||
@@ -335,7 +335,7 @@ jobs:
|
||||
python -m build --sdist --wheel
|
||||
|
||||
- name: Upload artifacts 📦
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: freqtrade-build
|
||||
path: |
|
||||
@@ -348,7 +348,7 @@ jobs:
|
||||
python -m build --sdist --wheel ft_client
|
||||
|
||||
- name: Upload artifacts 📦
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: freqtrade-client-build
|
||||
path: |
|
||||
@@ -372,7 +372,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Download artifact 📦
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
pattern: freqtrade*-build
|
||||
path: dist
|
||||
@@ -401,7 +401,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Download artifact 📦
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
pattern: freqtrade*-build
|
||||
path: dist
|
||||
|
||||
14
.github/workflows/docker-build.yml
vendored
14
.github/workflows/docker-build.yml
vendored
@@ -37,6 +37,17 @@ jobs:
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Visualize disk usage before build
|
||||
run: df -h
|
||||
|
||||
- name: Cleanup some disk space
|
||||
run: |
|
||||
docker system prune -a --force || true
|
||||
docker builder prune -af || true
|
||||
|
||||
- name: Visualize disk usage after cleanup
|
||||
run: df -h
|
||||
|
||||
- name: Set docker tag names
|
||||
id: tags
|
||||
uses: ./.github/actions/docker-tags
|
||||
@@ -142,6 +153,9 @@ jobs:
|
||||
run: |
|
||||
docker images
|
||||
|
||||
- name: Visualize disk usage after build
|
||||
run: df -h
|
||||
|
||||
deploy-arm:
|
||||
name: "Deploy Docker ARM64"
|
||||
permissions:
|
||||
|
||||
2
.github/workflows/pre-commit-update.yml
vendored
2
.github/workflows/pre-commit-update.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
- name: Run auto-update
|
||||
run: pre-commit autoupdate
|
||||
|
||||
- uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
|
||||
- uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
token: ${{ secrets.REPO_SCOPED_TOKEN }}
|
||||
add-paths: .pre-commit-config.yaml
|
||||
|
||||
@@ -31,8 +31,8 @@ repos:
|
||||
- types-requests==2.32.4.20250913
|
||||
- types-tabulate==0.9.0.20241207
|
||||
- types-python-dateutil==2.9.0.20251115
|
||||
- scipy-stubs==1.16.3.2
|
||||
- SQLAlchemy==2.0.44
|
||||
- scipy-stubs==1.16.3.3
|
||||
- SQLAlchemy==2.0.45
|
||||
# stages: [push]
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
@@ -44,7 +44,7 @@ repos:
|
||||
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: 'v0.14.9'
|
||||
rev: 'v0.14.10'
|
||||
hooks:
|
||||
- id: ruff
|
||||
- id: ruff-format
|
||||
@@ -83,6 +83,6 @@ repos:
|
||||
|
||||
# Ensure github actions remain safe
|
||||
- repo: https://github.com/woodruffw/zizmor-pre-commit
|
||||
rev: v1.18.0
|
||||
rev: v1.19.0
|
||||
hooks:
|
||||
- id: zizmor
|
||||
|
||||
@@ -135,3 +135,13 @@ you can verify this with `freqtrade list-data --exchange <yourexchange> --show`.
|
||||
Additional arguments to the above commands may be necessary, like configuration files or explicit user_data if they deviate from the default.
|
||||
|
||||
**Hyperliquid** is a special case now - which will no longer require 1h mark data - but will use regular candles instead (this data never existed and is identical to 1h futures candles). As we don't support download-data for hyperliquid (they don't provide historic data) - there won't be actions necessary for hyperliquid users.
|
||||
|
||||
## Catboost models in freqAI
|
||||
|
||||
CatBoost models have been removed with version 2025.12 and are no longer actively supported.
|
||||
If you have existing bots using CatBoost models, you can still use them in your custom models by copy/pasting them from the git history (as linked below) and installing the Catboost library manually.
|
||||
We do however recommend switching to other supported model libraries like LightGBM or XGBoost for better support and future compatibility.
|
||||
|
||||
* [CatboostRegressor](https://github.com/freqtrade/freqtrade/blob/c6f3b0081927e161a16b116cc47fb663f7831d30/freqtrade/freqai/prediction_models/CatboostRegressor.py)
|
||||
* [CatboostClassifier](https://github.com/freqtrade/freqtrade/blob/c6f3b0081927e161a16b116cc47fb663f7831d30/freqtrade/freqai/prediction_models/CatboostClassifier.py)
|
||||
* [CatboostClassifierMultiTarget](https://github.com/freqtrade/freqtrade/blob/c6f3b0081927e161a16b116cc47fb663f7831d30/freqtrade/freqai/prediction_models/CatboostClassifierMultiTarget.py)
|
||||
|
||||
@@ -200,15 +200,15 @@ If this value is set, FreqAI will initially use the predictions from the trainin
|
||||
|
||||
## Using different prediction models
|
||||
|
||||
FreqAI has multiple example prediction model libraries that are ready to be used as is via the flag `--freqaimodel`. These libraries include `CatBoost`, `LightGBM`, and `XGBoost` regression, classification, and multi-target models, and can be found in `freqai/prediction_models/`.
|
||||
FreqAI has multiple example prediction model libraries that are ready to be used as is via the flag `--freqaimodel`. These libraries include `LightGBM`, and `XGBoost` regression, classification, and multi-target models, and can be found in `freqai/prediction_models/`.
|
||||
|
||||
Regression and classification models differ in what targets they predict - a regression model will predict a target of continuous values, for example what price BTC will be at tomorrow, whilst a classifier will predict a target of discrete values, for example if the price of BTC will go up tomorrow or not. This means that you have to specify your targets differently depending on which model type you are using (see details [below](#setting-model-targets)).
|
||||
|
||||
All of the aforementioned model libraries implement gradient boosted decision tree algorithms. They all work on the principle of ensemble learning, where predictions from multiple simple learners are combined to get a final prediction that is more stable and generalized. The simple learners in this case are decision trees. Gradient boosting refers to the method of learning, where each simple learner is built in sequence - the subsequent learner is used to improve on the error from the previous learner. If you want to learn more about the different model libraries you can find the information in their respective docs:
|
||||
|
||||
* CatBoost: https://catboost.ai/en/docs/
|
||||
* LightGBM: https://lightgbm.readthedocs.io/en/v3.3.2/#
|
||||
* XGBoost: https://xgboost.readthedocs.io/en/stable/#
|
||||
* LightGBM: <https://lightgbm.readthedocs.io/en/v3.3.2/#>
|
||||
* XGBoost: <https://xgboost.readthedocs.io/en/stable/#>
|
||||
* CatBoost: <https://catboost.ai/en/docs/> (No longer actively supported since 2025.12)
|
||||
|
||||
There are also numerous online articles describing and comparing the algorithms. Some relatively lightweight examples would be [CatBoost vs. LightGBM vs. XGBoost — Which is the best algorithm?](https://towardsdatascience.com/catboost-vs-lightgbm-vs-xgboost-c80f40662924#:~:text=In%20CatBoost%2C%20symmetric%20trees%2C%20or,the%20same%20depth%20can%20differ.) and [XGBoost, LightGBM or CatBoost — which boosting algorithm should I use?](https://medium.com/riskified-technology/xgboost-lightgbm-or-catboost-which-boosting-algorithm-should-i-use-e7fda7bb36bc). Keep in mind that the performance of each model is highly dependent on the application and so any reported metrics might not be true for your particular use of the model.
|
||||
|
||||
@@ -219,7 +219,7 @@ Make sure to use unique names to avoid overriding built-in models.
|
||||
|
||||
#### Regressors
|
||||
|
||||
If you are using a regressor, you need to specify a target that has continuous values. FreqAI includes a variety of regressors, such as the `CatboostRegressor`via the flag `--freqaimodel CatboostRegressor`. An example of how you could set a regression target for predicting the price 100 candles into the future would be
|
||||
If you are using a regressor, you need to specify a target that has continuous values. FreqAI includes a variety of regressors, such as the `LightGBMRegressor`via the flag `--freqaimodel LightGBMRegressor`. An example of how you could set a regression target for predicting the price 100 candles into the future would be
|
||||
|
||||
```python
|
||||
df['&s-close_price'] = df['close'].shift(-100)
|
||||
@@ -229,7 +229,7 @@ If you want to predict multiple targets, you need to define multiple labels usin
|
||||
|
||||
#### Classifiers
|
||||
|
||||
If you are using a classifier, you need to specify a target that has discrete values. FreqAI includes a variety of classifiers, such as the `CatboostClassifier` via the flag `--freqaimodel CatboostClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example, if you want to predict if the price 100 candles into the future goes up or down you would set
|
||||
If you are using a classifier, you need to specify a target that has discrete values. FreqAI includes a variety of classifiers, such as the `LightGBMClassifier` via the flag `--freqaimodel LightGBMClassifier`. If you elects to use a classifier, the classes need to be set using strings. For example, if you want to predict if the price 100 candles into the future goes up or down you would set
|
||||
|
||||
```python
|
||||
df['&s-up_or_down'] = np.where( df["close"].shift(-100) > df["close"], 'up', 'down')
|
||||
|
||||
@@ -107,7 +107,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
||||
| `n_steps` | An alternative way of setting `n_epochs` - the number of training iterations to run. Iteration here refer to the number of times we call `optimizer.step()`. Ignored if `n_epochs` is set. A simplified version of the function: <br><br> n_epochs = n_steps / (n_obs / batch_size) <br><br> The motivation here is that `n_steps` is easier to optimize and keep stable across different n_obs - the number of data points. <br> <br> **Datatype:** int. optional. <br> Default: `None`.
|
||||
| `batch_size` | The size of the batches to use during training. <br><br> **Datatype:** int. <br> Default: `64`.
|
||||
|
||||
|
||||
### Additional parameters
|
||||
|
||||
| Parameter | Description |
|
||||
@@ -116,3 +115,4 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
||||
| `freqai.keras` | If the selected model makes use of Keras (typical for TensorFlow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `freqai.conv_width` | The width of a neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
||||
| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
| `freqai.override_exchange_check` | Override the exchange check to force FreqAI to use exchanges that may not have enough historic data. Turn this to True if you know your FreqAI model and strategy do not require historical data. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||
|
||||
@@ -2,6 +2,6 @@ markdown==3.10
|
||||
mkdocs==1.6.1
|
||||
mkdocs-material==9.7.0
|
||||
mdx_truly_sane_lists==1.3
|
||||
pymdown-extensions==10.18
|
||||
pymdown-extensions==10.19.1
|
||||
jinja2==3.1.6
|
||||
mike==2.1.3
|
||||
|
||||
@@ -17,7 +17,7 @@ from freqtrade.exchange.binance_public_data import (
|
||||
download_archive_trades,
|
||||
)
|
||||
from freqtrade.exchange.common import retrier
|
||||
from freqtrade.exchange.exchange_types import CcxtOrder, FtHas, Tickers
|
||||
from freqtrade.exchange.exchange_types import FtHas, Tickers
|
||||
from freqtrade.exchange.exchange_utils_timeframe import timeframe_to_msecs
|
||||
from freqtrade.misc import deep_merge_dicts, json_load
|
||||
from freqtrade.util import FtTTLCache
|
||||
@@ -51,6 +51,8 @@ class Binance(Exchange):
|
||||
"funding_fee_candle_limit": 1000,
|
||||
"stoploss_order_types": {"limit": "stop", "market": "stop_market"},
|
||||
"stoploss_blocks_assets": False, # Stoploss orders do not block assets
|
||||
"stoploss_query_requires_stop_flag": True,
|
||||
"stoploss_algo_order_info_id": "actualOrderId",
|
||||
"tickers_have_price": False,
|
||||
"floor_leverage": True,
|
||||
"fetch_orders_limit_minutes": 7 * 1440, # "fetch_orders" is limited to 7 days
|
||||
@@ -145,20 +147,6 @@ class Binance(Exchange):
|
||||
except ccxt.BaseError as e:
|
||||
raise OperationalException(e) from e
|
||||
|
||||
def fetch_stoploss_order(
|
||||
self, order_id: str, pair: str, params: dict | None = None
|
||||
) -> CcxtOrder:
|
||||
if self.trading_mode == TradingMode.FUTURES:
|
||||
params = params or {}
|
||||
params.update({"stop": True})
|
||||
return self.fetch_order(order_id, pair, params)
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
|
||||
if self.trading_mode == TradingMode.FUTURES:
|
||||
params = params or {}
|
||||
params.update({"stop": True})
|
||||
return self.cancel_order(order_id=order_id, pair=pair, params=params)
|
||||
|
||||
def get_historic_ohlcv(
|
||||
self,
|
||||
pair: str,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -31,6 +31,7 @@ class Bitget(Exchange):
|
||||
"stop_price_prop": "stopPrice",
|
||||
"stoploss_blocks_assets": False, # Stoploss orders do not block assets
|
||||
"stoploss_order_types": {"limit": "limit", "market": "market"},
|
||||
"stoploss_query_requires_stop_flag": True,
|
||||
"ohlcv_candle_limit": 200, # 200 for historical candles, 1000 for recent ones.
|
||||
"order_time_in_force": ["GTC", "FOK", "IOC", "PO"],
|
||||
}
|
||||
@@ -128,9 +129,6 @@ class Bitget(Exchange):
|
||||
|
||||
return self._fetch_stop_order_fallback(order_id, pair)
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
|
||||
return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True})
|
||||
|
||||
@retrier
|
||||
def additional_exchange_init(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -132,6 +132,7 @@ class Exchange:
|
||||
"stop_price_prop": "stopLossPrice", # Used for stoploss_on_exchange response parsing
|
||||
"stoploss_order_types": {},
|
||||
"stoploss_blocks_assets": True, # By default stoploss orders block assets
|
||||
"stoploss_query_requires_stop_flag": False, # Require "stop": True" to fetch stop orders
|
||||
"order_time_in_force": ["GTC"],
|
||||
"ohlcv_params": {},
|
||||
"ohlcv_has_history": True, # Some exchanges (Kraken) don't provide history via ohlcv
|
||||
@@ -1687,7 +1688,24 @@ class Exchange:
|
||||
def fetch_stoploss_order(
|
||||
self, order_id: str, pair: str, params: dict | None = None
|
||||
) -> CcxtOrder:
|
||||
return self.fetch_order(order_id, pair, params)
|
||||
if self.get_option("stoploss_query_requires_stop_flag"):
|
||||
params = params or {}
|
||||
params["stop"] = True
|
||||
order = self.fetch_order(order_id, pair, params)
|
||||
val = self.get_option("stoploss_algo_order_info_id")
|
||||
if val and order.get("status", "open") == "closed":
|
||||
if new_orderid := order.get("info", {}).get(val):
|
||||
# Fetch real order, which was placed by the algo order.
|
||||
actual_order = self.fetch_order(order_id=new_orderid, pair=pair, params=None)
|
||||
actual_order["id_stop"] = actual_order["id"]
|
||||
actual_order["id"] = order_id
|
||||
actual_order["type"] = "stoploss"
|
||||
actual_order["stopPrice"] = order.get("stopPrice")
|
||||
actual_order["status_stop"] = "triggered"
|
||||
|
||||
return actual_order
|
||||
|
||||
return order
|
||||
|
||||
def fetch_order_or_stoploss_order(
|
||||
self, order_id: str, pair: str, stoploss_order: bool = False
|
||||
@@ -1741,6 +1759,9 @@ class Exchange:
|
||||
raise OperationalException(e) from e
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
|
||||
if self.get_option("stoploss_query_requires_stop_flag"):
|
||||
params = params or {}
|
||||
params["stop"] = True
|
||||
return self.cancel_order(order_id, pair, params)
|
||||
|
||||
def is_cancel_order_result_suitable(self, corder) -> TypeGuard[CcxtOrder]:
|
||||
|
||||
@@ -19,6 +19,8 @@ class FtHas(TypedDict, total=False):
|
||||
stop_price_type_value_mapping: dict
|
||||
stoploss_order_types: dict[str, str]
|
||||
stoploss_blocks_assets: bool
|
||||
stoploss_query_requires_stop_flag: bool
|
||||
stoploss_algo_order_info_id: str
|
||||
# ohlcv
|
||||
ohlcv_params: dict
|
||||
ohlcv_candle_limit: int
|
||||
|
||||
@@ -30,6 +30,8 @@ class Gate(Exchange):
|
||||
"stoploss_order_types": {"limit": "limit"},
|
||||
"stop_price_param": "stopPrice",
|
||||
"stop_price_prop": "stopPrice",
|
||||
"stoploss_query_requires_stop_flag": True,
|
||||
"stoploss_algo_order_info_id": "fired_order_id",
|
||||
"l2_limit_upper": 1000,
|
||||
"marketOrderRequiresPrice": True,
|
||||
"trades_has_history": False, # Endpoint would support this - but ccxt doesn't.
|
||||
@@ -42,6 +44,7 @@ class Gate(Exchange):
|
||||
"stop_price_type_field": "price_type",
|
||||
"l2_limit_upper": 300,
|
||||
"stoploss_blocks_assets": False,
|
||||
"stoploss_algo_order_info_id": "trade_id",
|
||||
"stop_price_type_value_mapping": {
|
||||
PriceType.LAST: 0,
|
||||
PriceType.MARK: 1,
|
||||
@@ -132,25 +135,3 @@ class Gate(Exchange):
|
||||
|
||||
def get_order_id_conditional(self, order: CcxtOrder) -> str:
|
||||
return safe_value_fallback2(order, order, "id_stop", "id")
|
||||
|
||||
def fetch_stoploss_order(
|
||||
self, order_id: str, pair: str, params: dict | None = None
|
||||
) -> CcxtOrder:
|
||||
order = self.fetch_order(order_id=order_id, pair=pair, params={"stop": True})
|
||||
if order.get("status", "open") == "closed":
|
||||
# Places a real order - which we need to fetch explicitly.
|
||||
val = "trade_id" if self.trading_mode == TradingMode.FUTURES else "fired_order_id"
|
||||
|
||||
if new_orderid := order.get("info", {}).get(val):
|
||||
order1 = self.fetch_order(order_id=new_orderid, pair=pair, params=params)
|
||||
order1["id_stop"] = order1["id"]
|
||||
order1["id"] = order_id
|
||||
order1["type"] = "stoploss"
|
||||
order1["stopPrice"] = order.get("stopPrice")
|
||||
order1["status_stop"] = "triggered"
|
||||
|
||||
return order1
|
||||
return order
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
|
||||
return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True})
|
||||
|
||||
@@ -31,6 +31,7 @@ class Okx(Exchange):
|
||||
"ohlcv_candle_limit": 100, # Warning, special case with data prior to X months
|
||||
"stoploss_order_types": {"limit": "limit"},
|
||||
"stoploss_on_exchange": True,
|
||||
"stoploss_query_requires_stop_flag": True,
|
||||
"trades_has_history": False, # Endpoint doesn't have a "since" parameter
|
||||
"ws_enabled": True,
|
||||
}
|
||||
@@ -263,9 +264,6 @@ class Okx(Exchange):
|
||||
return safe_value_fallback2(order, order, "id_stop", "id")
|
||||
return order["id"]
|
||||
|
||||
def cancel_stoploss_order(self, order_id: str, pair: str, params: dict | None = None) -> dict:
|
||||
return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True})
|
||||
|
||||
def _fetch_orders_emulate(self, pair: str, since_ms: int) -> list[CcxtOrder]:
|
||||
orders = []
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ class BaseClassifierModel(IFreqaiModel):
|
||||
"""
|
||||
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
|
||||
User *must* inherit from this class and set fit(). See example scripts
|
||||
such as prediction_models/CatboostClassifier.py for guidance.
|
||||
such as prediction_models/XGBoostClassifier.py for guidance.
|
||||
"""
|
||||
|
||||
def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
|
||||
@@ -18,7 +18,7 @@ class BaseRegressionModel(IFreqaiModel):
|
||||
"""
|
||||
Base class for regression type models (e.g. Catboost, LightGBM, XGboost etc.).
|
||||
User *must* inherit from this class and set fit(). See example scripts
|
||||
such as prediction_models/CatboostRegressor.py for guidance.
|
||||
such as prediction_models/XGBoostRegressor.py for guidance.
|
||||
"""
|
||||
|
||||
def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
|
||||
@@ -948,7 +948,7 @@ class IFreqaiModel(ABC):
|
||||
return dk
|
||||
|
||||
# Following methods which are overridden by user made prediction models.
|
||||
# See freqai/prediction_models/CatboostPredictionModel.py for an example.
|
||||
# See freqai/prediction_models/XGBoostRegressor.py for an example.
|
||||
|
||||
@abstractmethod
|
||||
def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
@@ -964,7 +964,7 @@ class IFreqaiModel(ABC):
|
||||
def fit(self, data_dictionary: dict[str, Any], dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
Most regressors use the same function names and arguments e.g. user
|
||||
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
||||
can drop in LGBMRegressor in place of XGBoostRegressor and all data
|
||||
management will be properly handled by Freqai.
|
||||
:param data_dictionary: Dict = the dictionary constructed by DataHandler to hold
|
||||
all the training and test data/labels.
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from catboost import CatBoostClassifier, Pool
|
||||
|
||||
from freqtrade.freqai.base_models.BaseClassifierModel import BaseClassifierModel
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostClassifier(BaseClassifierModel):
|
||||
"""
|
||||
User created prediction model. The class inherits IFreqaiModel, which
|
||||
means it has full access to all Frequency AI functionality. Typically,
|
||||
users would use this to override the common `fit()`, `train()`, or
|
||||
`predict()` methods to add their custom data handling tools or change
|
||||
various aspects of the training that cannot be configured via the
|
||||
top level config.json file.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:param data_dictionary: the dictionary holding all data for train, test,
|
||||
labels, weights
|
||||
:param dk: The datakitchen object for the current coin/model
|
||||
"""
|
||||
|
||||
train_data = Pool(
|
||||
data=data_dictionary["train_features"],
|
||||
label=data_dictionary["train_labels"],
|
||||
weight=data_dictionary["train_weights"],
|
||||
)
|
||||
if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0:
|
||||
test_data = None
|
||||
else:
|
||||
test_data = Pool(
|
||||
data=data_dictionary["test_features"],
|
||||
label=data_dictionary["test_labels"],
|
||||
weight=data_dictionary["test_weights"],
|
||||
)
|
||||
|
||||
cbr = CatBoostClassifier(
|
||||
allow_writing_files=True,
|
||||
loss_function="MultiClass",
|
||||
train_dir=Path(dk.data_path),
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
init_model = self.get_init_model(dk.pair)
|
||||
|
||||
cbr.fit(
|
||||
X=train_data,
|
||||
eval_set=test_data,
|
||||
init_model=init_model,
|
||||
)
|
||||
|
||||
return cbr
|
||||
@@ -1,79 +0,0 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from catboost import CatBoostClassifier, Pool
|
||||
|
||||
from freqtrade.freqai.base_models.BaseClassifierModel import BaseClassifierModel
|
||||
from freqtrade.freqai.base_models.FreqaiMultiOutputClassifier import FreqaiMultiOutputClassifier
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostClassifierMultiTarget(BaseClassifierModel):
|
||||
"""
|
||||
User created prediction model. The class inherits IFreqaiModel, which
|
||||
means it has full access to all Frequency AI functionality. Typically,
|
||||
users would use this to override the common `fit()`, `train()`, or
|
||||
`predict()` methods to add their custom data handling tools or change
|
||||
various aspects of the training that cannot be configured via the
|
||||
top level config.json file.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:param data_dictionary: the dictionary holding all data for train, test,
|
||||
labels, weights
|
||||
:param dk: The datakitchen object for the current coin/model
|
||||
"""
|
||||
|
||||
cbc = CatBoostClassifier(
|
||||
allow_writing_files=True,
|
||||
loss_function="MultiClass",
|
||||
train_dir=Path(dk.data_path),
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
X = data_dictionary["train_features"]
|
||||
y = data_dictionary["train_labels"]
|
||||
|
||||
sample_weight = data_dictionary["train_weights"]
|
||||
|
||||
eval_sets = [None] * y.shape[1]
|
||||
|
||||
if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0:
|
||||
eval_sets = [None] * data_dictionary["test_labels"].shape[1]
|
||||
|
||||
for i in range(data_dictionary["test_labels"].shape[1]):
|
||||
eval_sets[i] = Pool(
|
||||
data=data_dictionary["test_features"],
|
||||
label=data_dictionary["test_labels"].iloc[:, i],
|
||||
weight=data_dictionary["test_weights"],
|
||||
)
|
||||
|
||||
init_model = self.get_init_model(dk.pair)
|
||||
|
||||
if init_model:
|
||||
init_models = init_model.estimators_
|
||||
else:
|
||||
init_models = [None] * y.shape[1]
|
||||
|
||||
fit_params = []
|
||||
for i in range(len(eval_sets)):
|
||||
fit_params.append(
|
||||
{
|
||||
"eval_set": eval_sets[i],
|
||||
"init_model": init_models[i],
|
||||
}
|
||||
)
|
||||
|
||||
model = FreqaiMultiOutputClassifier(estimator=cbc)
|
||||
thread_training = self.freqai_info.get("multitarget_parallel_training", False)
|
||||
if thread_training:
|
||||
model.n_jobs = y.shape[1]
|
||||
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
|
||||
|
||||
return model
|
||||
@@ -1,60 +0,0 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from catboost import CatBoostRegressor, Pool
|
||||
|
||||
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostRegressor(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class inherits IFreqaiModel, which
|
||||
means it has full access to all Frequency AI functionality. Typically,
|
||||
users would use this to override the common `fit()`, `train()`, or
|
||||
`predict()` methods to add their custom data handling tools or change
|
||||
various aspects of the training that cannot be configured via the
|
||||
top level config.json file.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:param data_dictionary: the dictionary holding all data for train, test,
|
||||
labels, weights
|
||||
:param dk: The datakitchen object for the current coin/model
|
||||
"""
|
||||
|
||||
train_data = Pool(
|
||||
data=data_dictionary["train_features"],
|
||||
label=data_dictionary["train_labels"],
|
||||
weight=data_dictionary["train_weights"],
|
||||
)
|
||||
if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0:
|
||||
test_data = None
|
||||
else:
|
||||
test_data = Pool(
|
||||
data=data_dictionary["test_features"],
|
||||
label=data_dictionary["test_labels"],
|
||||
weight=data_dictionary["test_weights"],
|
||||
)
|
||||
|
||||
init_model = self.get_init_model(dk.pair)
|
||||
|
||||
model = CatBoostRegressor(
|
||||
allow_writing_files=True,
|
||||
train_dir=Path(dk.data_path),
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
model.fit(
|
||||
X=train_data,
|
||||
eval_set=test_data,
|
||||
init_model=init_model,
|
||||
)
|
||||
|
||||
return model
|
||||
@@ -1,78 +0,0 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from catboost import CatBoostRegressor, Pool
|
||||
|
||||
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||
from freqtrade.freqai.base_models.FreqaiMultiOutputRegressor import FreqaiMultiOutputRegressor
|
||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CatboostRegressorMultiTarget(BaseRegressionModel):
|
||||
"""
|
||||
User created prediction model. The class inherits IFreqaiModel, which
|
||||
means it has full access to all Frequency AI functionality. Typically,
|
||||
users would use this to override the common `fit()`, `train()`, or
|
||||
`predict()` methods to add their custom data handling tools or change
|
||||
various aspects of the training that cannot be configured via the
|
||||
top level config.json file.
|
||||
"""
|
||||
|
||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:param data_dictionary: the dictionary holding all data for train, test,
|
||||
labels, weights
|
||||
:param dk: The datakitchen object for the current coin/model
|
||||
"""
|
||||
|
||||
cbr = CatBoostRegressor(
|
||||
allow_writing_files=True,
|
||||
train_dir=Path(dk.data_path),
|
||||
**self.model_training_parameters,
|
||||
)
|
||||
|
||||
X = data_dictionary["train_features"]
|
||||
y = data_dictionary["train_labels"]
|
||||
|
||||
sample_weight = data_dictionary["train_weights"]
|
||||
|
||||
eval_sets = [None] * y.shape[1]
|
||||
|
||||
if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0:
|
||||
eval_sets = [None] * data_dictionary["test_labels"].shape[1]
|
||||
|
||||
for i in range(data_dictionary["test_labels"].shape[1]):
|
||||
eval_sets[i] = Pool(
|
||||
data=data_dictionary["test_features"],
|
||||
label=data_dictionary["test_labels"].iloc[:, i],
|
||||
weight=data_dictionary["test_weights"],
|
||||
)
|
||||
|
||||
init_model = self.get_init_model(dk.pair)
|
||||
|
||||
if init_model:
|
||||
init_models = init_model.estimators_
|
||||
else:
|
||||
init_models = [None] * y.shape[1]
|
||||
|
||||
fit_params = []
|
||||
for i in range(len(eval_sets)):
|
||||
fit_params.append(
|
||||
{
|
||||
"eval_set": eval_sets[i],
|
||||
"init_model": init_models[i],
|
||||
}
|
||||
)
|
||||
|
||||
model = FreqaiMultiOutputRegressor(estimator=cbr)
|
||||
thread_training = self.freqai_info.get("multitarget_parallel_training", False)
|
||||
if thread_training:
|
||||
model.n_jobs = y.shape[1]
|
||||
model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params)
|
||||
|
||||
return model
|
||||
@@ -97,7 +97,7 @@ def plot_feature_importance(
|
||||
"""
|
||||
Plot Best and worst features by importance for a single sub-train.
|
||||
:param model: Any = A model which was `fit` using a common library
|
||||
such as catboost or lightgbm
|
||||
such as XGBoost or lightgbm
|
||||
:param pair: str = pair e.g. BTC/USD
|
||||
:param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop
|
||||
:param count_max: int = the amount of features to be loaded per column
|
||||
@@ -115,6 +115,8 @@ def plot_feature_importance(
|
||||
for label in models:
|
||||
mdl = models[label]
|
||||
if "catboost.core" in str(mdl.__class__):
|
||||
# CatBoost is no longer actively supported since 2025.12
|
||||
# However users can still use it in their custom models
|
||||
feature_importance = mdl.get_feature_importance()
|
||||
elif "lightgbm.sklearn" in str(mdl.__class__):
|
||||
feature_importance = mdl.feature_importances_
|
||||
|
||||
@@ -52,29 +52,25 @@ def __run_backtest_bg(btconfig: Config):
|
||||
lastconfig = ApiBG.bt["last_config"]
|
||||
strat = StrategyResolver.load_strategy(btconfig)
|
||||
validate_config_consistency(btconfig)
|
||||
|
||||
if (
|
||||
not ApiBG.bt["bt"]
|
||||
or lastconfig.get("timeframe") != strat.timeframe
|
||||
time_settings_changed = (
|
||||
lastconfig.get("timeframe") != strat.timeframe
|
||||
or lastconfig.get("timeframe_detail") != btconfig.get("timeframe_detail")
|
||||
or lastconfig.get("timerange") != btconfig["timerange"]
|
||||
):
|
||||
)
|
||||
|
||||
if not ApiBG.bt["bt"] or time_settings_changed:
|
||||
from freqtrade.optimize.backtesting import Backtesting
|
||||
|
||||
ApiBG.bt["bt"] = Backtesting(btconfig)
|
||||
else:
|
||||
ApiBG.bt["bt"].config = deep_merge_dicts(btconfig, ApiBG.bt["bt"].config)
|
||||
ApiBG.bt["bt"].init_backtest()
|
||||
# Only reload data if timeframe changed.
|
||||
if (
|
||||
not ApiBG.bt["data"]
|
||||
or not ApiBG.bt["timerange"]
|
||||
or lastconfig.get("timeframe") != strat.timeframe
|
||||
or lastconfig.get("timerange") != btconfig["timerange"]
|
||||
):
|
||||
# Only reload data if timerange is open or settings changed
|
||||
if not ApiBG.bt["data"] or not ApiBG.bt["timerange"] or time_settings_changed:
|
||||
ApiBG.bt["data"], ApiBG.bt["timerange"] = ApiBG.bt["bt"].load_bt_data()
|
||||
|
||||
lastconfig["timerange"] = btconfig["timerange"]
|
||||
lastconfig["timeframe_detail"] = btconfig.get("timeframe_detail")
|
||||
lastconfig["timeframe"] = strat.timeframe
|
||||
lastconfig["enable_protections"] = btconfig.get("enable_protections")
|
||||
lastconfig["dry_run_wallet"] = btconfig.get("dry_run_wallet")
|
||||
|
||||
@@ -20,7 +20,7 @@ class FreqaiExampleHybridStrategy(IStrategy):
|
||||
Launching this strategy would be:
|
||||
|
||||
freqtrade trade --strategy FreqaiExampleHybridStrategy --strategy-path freqtrade/templates
|
||||
--freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json
|
||||
--freqaimodel XGBoostClassifier --config config_examples/config_freqai.example.json
|
||||
|
||||
or the user simply adds this to their config:
|
||||
|
||||
|
||||
@@ -205,8 +205,8 @@ class FreqaiExampleStrategy(IStrategy):
|
||||
# If user wishes to use multiple targets, they can add more by
|
||||
# appending more columns with '&'. User should keep in mind that multi targets
|
||||
# requires a multioutput prediction model such as
|
||||
# freqai/prediction_models/CatboostRegressorMultiTarget.py,
|
||||
# freqtrade trade --freqaimodel CatboostRegressorMultiTarget
|
||||
# freqai/prediction_models/LightGBMClassifierMultiTarget.py,
|
||||
# freqtrade trade --freqaimodel LightGBMClassifierMultiTarget
|
||||
|
||||
# df["&-s_range"] = (
|
||||
# df["close"]
|
||||
|
||||
@@ -85,7 +85,6 @@ hyperopt = [
|
||||
freqai = [
|
||||
"scikit-learn",
|
||||
"joblib",
|
||||
"catboost; platform_machine != 'arm'",
|
||||
"lightgbm",
|
||||
"xgboost",
|
||||
"tensorboard",
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
-r requirements-freqai-rl.txt
|
||||
-r docs/requirements-docs.txt
|
||||
|
||||
ruff==0.14.8
|
||||
mypy==1.19.0
|
||||
ruff==0.14.9
|
||||
mypy==1.19.1
|
||||
pre-commit==4.5.0
|
||||
pytest==9.0.2
|
||||
pytest-asyncio==1.3.0
|
||||
@@ -24,7 +24,7 @@ time-machine==3.1.0
|
||||
nbconvert==7.16.6
|
||||
|
||||
# mypy types
|
||||
scipy-stubs==1.16.3.2 # keep in sync with `scipy` in `requirements-hyperopt.txt`
|
||||
scipy-stubs==1.16.3.3 # keep in sync with `scipy` in `requirements-hyperopt.txt`
|
||||
types-cachetools==6.2.0.20251022
|
||||
types-filelock==3.2.7
|
||||
types-requests==2.32.4.20250913
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
# Required for freqai
|
||||
scikit-learn==1.7.2
|
||||
joblib==1.5.3
|
||||
catboost==1.2.8; 'arm' not in platform_machine and python_version < '3.14'
|
||||
lightgbm==4.6.0
|
||||
xgboost==3.1.2
|
||||
tensorboard==2.20.0
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
# Required for hyperopt
|
||||
scipy==1.16.3
|
||||
scikit-learn==1.7.2
|
||||
filelock==3.20.0
|
||||
filelock==3.20.1
|
||||
optuna==4.6.0
|
||||
cmaes==0.12.0
|
||||
|
||||
@@ -7,17 +7,17 @@ ft-pandas-ta==0.3.16
|
||||
ta-lib==0.6.8
|
||||
technical==1.5.3
|
||||
|
||||
ccxt==4.5.27
|
||||
ccxt==4.5.28
|
||||
cryptography==46.0.3
|
||||
aiohttp==3.13.2
|
||||
SQLAlchemy==2.0.44
|
||||
SQLAlchemy==2.0.45
|
||||
python-telegram-bot==22.5
|
||||
# can't be hard-pinned due to telegram-bot pinning httpx with ~
|
||||
httpx>=0.24.1
|
||||
humanize==4.14.0
|
||||
cachetools==6.2.2
|
||||
cachetools==6.2.3
|
||||
requests==2.32.5
|
||||
urllib3==2.6.0
|
||||
urllib3==2.6.2
|
||||
certifi==2025.11.12
|
||||
jsonschema==4.25.1
|
||||
tabulate==0.9.0
|
||||
@@ -37,7 +37,7 @@ orjson==3.11.5
|
||||
sdnotify==0.3.2
|
||||
|
||||
# API Server
|
||||
fastapi==0.124.0
|
||||
fastapi==0.124.4
|
||||
pydantic==2.12.5
|
||||
uvicorn==0.38.0
|
||||
pyjwt==2.10.1
|
||||
|
||||
@@ -16,9 +16,9 @@ def test_fetch_stoploss_order_gate(default_conf, mocker):
|
||||
|
||||
exchange.fetch_stoploss_order("1234", "ETH/BTC")
|
||||
assert fetch_order_mock.call_count == 1
|
||||
assert fetch_order_mock.call_args_list[0][1]["order_id"] == "1234"
|
||||
assert fetch_order_mock.call_args_list[0][1]["pair"] == "ETH/BTC"
|
||||
assert fetch_order_mock.call_args_list[0][1]["params"] == {"stop": True}
|
||||
assert fetch_order_mock.call_args_list[0][0][0] == "1234"
|
||||
assert fetch_order_mock.call_args_list[0][0][1] == "ETH/BTC"
|
||||
assert fetch_order_mock.call_args_list[0][0][2] == {"stop": True}
|
||||
|
||||
default_conf["trading_mode"] = "futures"
|
||||
default_conf["margin_mode"] = "isolated"
|
||||
@@ -36,21 +36,19 @@ def test_fetch_stoploss_order_gate(default_conf, mocker):
|
||||
|
||||
exchange.fetch_stoploss_order("1234", "ETH/BTC")
|
||||
assert exchange.fetch_order.call_count == 2
|
||||
assert exchange.fetch_order.call_args_list[0][1]["order_id"] == "1234"
|
||||
assert exchange.fetch_order.call_args_list[0][0][0] == "1234"
|
||||
assert exchange.fetch_order.call_args_list[1][1]["order_id"] == "222555"
|
||||
|
||||
|
||||
def test_cancel_stoploss_order_gate(default_conf, mocker):
|
||||
exchange = get_patched_exchange(mocker, default_conf, exchange="gate")
|
||||
|
||||
cancel_order_mock = MagicMock()
|
||||
exchange.cancel_order = cancel_order_mock
|
||||
cancel_order_mock = mocker.patch.object(exchange, "cancel_order", autospec=True)
|
||||
|
||||
exchange.cancel_stoploss_order("1234", "ETH/BTC")
|
||||
assert cancel_order_mock.call_count == 1
|
||||
assert cancel_order_mock.call_args_list[0][1]["order_id"] == "1234"
|
||||
assert cancel_order_mock.call_args_list[0][1]["pair"] == "ETH/BTC"
|
||||
assert cancel_order_mock.call_args_list[0][1]["params"] == {"stop": True}
|
||||
assert cancel_order_mock.call_args_list[0][0][0] == "1234"
|
||||
assert cancel_order_mock.call_args_list[0][0][1] == "ETH/BTC"
|
||||
assert cancel_order_mock.call_args_list[0][0][2] == {"stop": True}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -661,14 +661,14 @@ def test_stoploss_adjust_okx(mocker, default_conf, sl1, sl2, sl3, side):
|
||||
|
||||
def test_stoploss_cancel_okx(mocker, default_conf):
|
||||
exchange = get_patched_exchange(mocker, default_conf, exchange="okx")
|
||||
|
||||
exchange.cancel_order = MagicMock()
|
||||
co_mock = mocker.patch.object(exchange, "cancel_order", autospec=True)
|
||||
|
||||
exchange.cancel_stoploss_order("1234", "ETH/USDT")
|
||||
assert exchange.cancel_order.call_count == 1
|
||||
assert exchange.cancel_order.call_args_list[0][1]["order_id"] == "1234"
|
||||
assert exchange.cancel_order.call_args_list[0][1]["pair"] == "ETH/USDT"
|
||||
assert exchange.cancel_order.call_args_list[0][1]["params"] == {"stop": True}
|
||||
assert co_mock.call_count == 1
|
||||
args, _ = co_mock.call_args
|
||||
assert args[0] == "1234"
|
||||
assert args[1] == "ETH/USDT"
|
||||
assert args[2] == {"stop": True}
|
||||
|
||||
|
||||
def test__get_stop_params_okx(mocker, default_conf):
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
@@ -32,12 +31,6 @@ from tests.freqai.conftest import (
|
||||
def can_run_model(model: str) -> None:
|
||||
is_pytorch_model = "Reinforcement" in model or "PyTorch" in model
|
||||
|
||||
if is_arm() and "Catboost" in model:
|
||||
pytest.skip("CatBoost is not supported on ARM.")
|
||||
|
||||
if "Catboost" in model and sys.version_info >= (3, 14):
|
||||
pytest.skip("CatBoost is not supported on Python 3.14+.")
|
||||
|
||||
if is_pytorch_model and is_mac():
|
||||
pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.")
|
||||
|
||||
@@ -48,7 +41,6 @@ def can_run_model(model: str) -> None:
|
||||
("LightGBMRegressor", True, False, True, True, False, 0, 0),
|
||||
("XGBoostRegressor", False, True, False, True, False, 10, 0.05),
|
||||
("XGBoostRFRegressor", False, False, False, True, False, 0, 0),
|
||||
("CatboostRegressor", False, False, False, True, True, 0, 0),
|
||||
("PyTorchMLPRegressor", False, False, False, False, False, 0, 0),
|
||||
("PyTorchTransformerRegressor", False, False, False, False, False, 0, 0),
|
||||
("ReinforcementLearner", False, True, False, True, False, 0, 0),
|
||||
@@ -142,9 +134,7 @@ def test_extract_data_and_train_model_Standard(
|
||||
[
|
||||
("LightGBMRegressorMultiTarget", "freqai_test_multimodel_strat"),
|
||||
("XGBoostRegressorMultiTarget", "freqai_test_multimodel_strat"),
|
||||
("CatboostRegressorMultiTarget", "freqai_test_multimodel_strat"),
|
||||
("LightGBMClassifierMultiTarget", "freqai_test_multimodel_classifier_strat"),
|
||||
("CatboostClassifierMultiTarget", "freqai_test_multimodel_classifier_strat"),
|
||||
],
|
||||
)
|
||||
@pytest.mark.filterwarnings(r"ignore:.*__sklearn_tags__.*:DeprecationWarning")
|
||||
@@ -188,7 +178,6 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, s
|
||||
"model",
|
||||
[
|
||||
"LightGBMClassifier",
|
||||
"CatboostClassifier",
|
||||
"XGBoostClassifier",
|
||||
"XGBoostRFClassifier",
|
||||
"SKLearnRandomForestClassifier",
|
||||
@@ -250,13 +239,11 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
|
||||
[
|
||||
("LightGBMRegressor", 2, "freqai_test_strat"),
|
||||
("XGBoostRegressor", 2, "freqai_test_strat"),
|
||||
("CatboostRegressor", 2, "freqai_test_strat"),
|
||||
("PyTorchMLPRegressor", 2, "freqai_test_strat"),
|
||||
("PyTorchTransformerRegressor", 2, "freqai_test_strat"),
|
||||
("ReinforcementLearner", 3, "freqai_rl_test_strat"),
|
||||
("XGBoostClassifier", 2, "freqai_test_classifier"),
|
||||
("LightGBMClassifier", 2, "freqai_test_classifier"),
|
||||
("CatboostClassifier", 2, "freqai_test_classifier"),
|
||||
("PyTorchMLPClassifier", 2, "freqai_test_classifier"),
|
||||
],
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user