Merge branch 'develop' into feature/stoploss-start-at

This commit is contained in:
Matthias
2024-08-02 19:51:09 +02:00
102 changed files with 8637 additions and 2296 deletions

View File

@@ -116,7 +116,7 @@ def test_list_exchanges(capsys):
start_list_exchanges(get_args(args))
captured = capsys.readouterr()
assert re.match(r"Exchanges available for Freqtrade.*", captured.out)
assert re.search(r".*Exchanges available for Freqtrade.*", captured.out)
assert re.search(r".*binance.*", captured.out)
assert re.search(r".*bybit.*", captured.out)
@@ -139,7 +139,7 @@ def test_list_exchanges(capsys):
start_list_exchanges(get_args(args))
captured = capsys.readouterr()
assert re.match(r"All exchanges supported by the ccxt library.*", captured.out)
assert re.search(r"All exchanges supported by the ccxt library.*", captured.out)
assert re.search(r".*binance.*", captured.out)
assert re.search(r".*bingx.*", captured.out)
assert re.search(r".*bitmex.*", captured.out)
@@ -293,7 +293,7 @@ def test_list_markets(mocker, markets_static, capsys):
pargs["config"] = None
start_list_markets(pargs, False)
captured = capsys.readouterr()
assert re.match("\nExchange Binance has 12 active markets:\n", captured.out)
assert re.search(r".*Exchange Binance has 12 active markets.*", captured.out)
patch_exchange(mocker, api_mock=api_mock, exchange="binance", mock_markets=markets_static)
# Test with --all: all markets
@@ -491,7 +491,7 @@ def test_list_markets(mocker, markets_static, capsys):
]
start_list_markets(get_args(args), False)
captured = capsys.readouterr()
assert "Exchange Binance has 12 active markets:\n" in captured.out
assert "Exchange Binance has 12 active markets" in captured.out
# Test tabular output, no markets found
args = [
@@ -1633,8 +1633,8 @@ def test_start_list_data(testdatadir, capsys):
start_list_data(pargs)
captured = capsys.readouterr()
assert "Found 16 pair / timeframe combinations." in captured.out
assert "\n| Pair | Timeframe | Type |\n" in captured.out
assert "\n| UNITTEST/BTC | 1m, 5m, 8m, 30m | spot |\n" in captured.out
assert re.search(r".*Pair.*Timeframe.*Type.*\n", captured.out)
assert re.search(r"\n.* UNITTEST/BTC .* 1m, 5m, 8m, 30m .* spot |\n", captured.out)
args = [
"list-data",
@@ -1650,9 +1650,9 @@ def test_start_list_data(testdatadir, capsys):
start_list_data(pargs)
captured = capsys.readouterr()
assert "Found 2 pair / timeframe combinations." in captured.out
assert "\n| Pair | Timeframe | Type |\n" in captured.out
assert re.search(r".*Pair.*Timeframe.*Type.*\n", captured.out)
assert "UNITTEST/BTC" not in captured.out
assert "\n| XRP/ETH | 1m, 5m | spot |\n" in captured.out
assert re.search(r"\n.* XRP/ETH .* 1m, 5m .* spot |\n", captured.out)
args = [
"list-data",
@@ -1667,9 +1667,9 @@ def test_start_list_data(testdatadir, capsys):
captured = capsys.readouterr()
assert "Found 6 pair / timeframe combinations." in captured.out
assert "\n| Pair | Timeframe | Type |\n" in captured.out
assert "\n| XRP/USDT:USDT | 5m, 1h | futures |\n" in captured.out
assert "\n| XRP/USDT:USDT | 1h, 8h | mark |\n" in captured.out
assert re.search(r".*Pair.*Timeframe.*Type.*\n", captured.out)
assert re.search(r"\n.* XRP/USDT:USDT .* 5m, 1h .* futures |\n", captured.out)
assert re.search(r"\n.* XRP/USDT:USDT .* 1h, 8h .* mark |\n", captured.out)
args = [
"list-data",
@@ -1684,15 +1684,12 @@ def test_start_list_data(testdatadir, capsys):
start_list_data(pargs)
captured = capsys.readouterr()
assert "Found 2 pair / timeframe combinations." in captured.out
assert (
"\n| Pair | Timeframe | Type "
"| From | To | Candles |\n"
) in captured.out
assert re.search(r".*Pair.*Timeframe.*Type.*From .* To .* Candles .*\n", captured.out)
assert "UNITTEST/BTC" not in captured.out
assert (
"\n| XRP/ETH | 1m | spot | "
"2019-10-11 00:00:00 | 2019-10-13 11:19:00 | 2469 |\n"
) in captured.out
assert re.search(
r"\n.* XRP/USDT .* 1m .* spot .* 2019-10-11 00:00:00 .* 2019-10-13 11:19:00 .* 2469 |\n",
captured.out,
)
@pytest.mark.usefixtures("init_persistence")

View File

@@ -614,6 +614,7 @@ def get_default_conf(testdatadir):
"internals": {},
"export": "none",
"dataformat_ohlcv": "feather",
"dataformat_trades": "feather",
"runmode": "dry_run",
"candle_type_def": CandleType.SPOT,
}
@@ -2187,7 +2188,7 @@ def tickers():
"first": None,
"last": 530.21,
"change": 0.558,
"percentage": None,
"percentage": 2.349,
"average": None,
"baseVolume": 72300.0659,
"quoteVolume": 37670097.3022171,

View File

@@ -324,7 +324,8 @@ def hyperopt_test_result():
"profit_mean": None,
"profit_median": None,
"profit_total": 0,
"profit": 0.0,
"max_drawdown_account": 0.0,
"max_drawdown_abs": 0.0,
"holding_avg": timedelta(),
}, # noqa: E501
"results_explanation": " 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.", # noqa: E501

View File

@@ -0,0 +1,483 @@
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from freqtrade.constants import DEFAULT_TRADES_COLUMNS
from freqtrade.data.converter import populate_dataframe_with_trades
from freqtrade.data.converter.orderflow import trades_to_volumeprofile_with_total_delta_bid_ask
from freqtrade.data.converter.trade_converter import trades_list_to_df
BIN_SIZE_SCALE = 0.5
def read_csv(filename, converter_columns: list = ["side", "type"]):
return pd.read_csv(
filename,
skipinitialspace=True,
index_col=0,
parse_dates=True,
date_format="ISO8601",
converters={col: str.strip for col in converter_columns},
)
@pytest.fixture
def populate_dataframe_with_trades_dataframe(testdatadir):
return pd.read_feather(testdatadir / "orderflow/populate_dataframe_with_trades_DF.feather")
@pytest.fixture
def populate_dataframe_with_trades_trades(testdatadir):
return pd.read_feather(testdatadir / "orderflow/populate_dataframe_with_trades_TRADES.feather")
@pytest.fixture
def candles(testdatadir):
return pd.read_json(testdatadir / "orderflow/candles.json").copy()
@pytest.fixture
def public_trades_list(testdatadir):
return read_csv(testdatadir / "orderflow/public_trades_list.csv").copy()
@pytest.fixture
def public_trades_list_simple(testdatadir):
return read_csv(testdatadir / "orderflow/public_trades_list_simple_example.csv").copy()
def test_public_trades_columns_before_change(
populate_dataframe_with_trades_dataframe, populate_dataframe_with_trades_trades
):
assert populate_dataframe_with_trades_dataframe.columns.tolist() == [
"date",
"open",
"high",
"low",
"close",
"volume",
]
assert populate_dataframe_with_trades_trades.columns.tolist() == [
"timestamp",
"id",
"type",
"side",
"price",
"amount",
"cost",
"date",
]
def test_public_trades_mock_populate_dataframe_with_trades__check_orderflow(
populate_dataframe_with_trades_dataframe, populate_dataframe_with_trades_trades
):
"""
Tests the `populate_dataframe_with_trades` function's order flow calculation.
This test checks the generated data frame and order flow for specific properties
based on the provided configuration and sample data.
"""
# Create copies of the input data to avoid modifying the originals
dataframe = populate_dataframe_with_trades_dataframe.copy()
trades = populate_dataframe_with_trades_trades.copy()
# Convert the 'date' column to datetime format with milliseconds
dataframe["date"] = pd.to_datetime(dataframe["date"], unit="ms")
# Select the last rows and reset the index (optional, depends on usage)
dataframe = dataframe.copy().tail().reset_index(drop=True)
# Define the configuration for order flow calculation
config = {
"timeframe": "5m",
"orderflow": {
"cache_size": 1000,
"max_candles": 1500,
"scale": 0.005,
"imbalance_volume": 0,
"imbalance_ratio": 3,
"stacked_imbalance_range": 3,
},
}
# Apply the function to populate the data frame with order flow data
df, _ = populate_dataframe_with_trades(OrderedDict(), config, dataframe, trades)
# Extract results from the first row of the DataFrame
results = df.iloc[0]
t = results["trades"]
of = results["orderflow"]
# Assert basic properties of the results
assert 0 != len(results)
assert 151 == len(t)
# --- Order Flow Analysis ---
# Assert number of order flow data points
assert 23 == len(of) # Assert expected number of data points
assert isinstance(of, dict)
of_values = list(of.values())
# Assert specific order flow values at the beginning of the DataFrame
assert of_values[0] == {
"bid": 0.0,
"ask": 1.0,
"delta": 4.999,
"bid_amount": 0.0,
"ask_amount": 4.999,
"total_volume": 4.999,
"total_trades": 1,
}
# Assert specific order flow values at the end of the DataFrame (excluding last row)
assert of_values[-1] == {
"bid": 0.0,
"ask": 1.0,
"delta": 0.103,
"bid_amount": 0.0,
"ask_amount": 0.103,
"total_volume": 0.103,
"total_trades": 1,
}
# Extract order flow from the last row of the DataFrame
of = df.iloc[-1]["orderflow"]
# Assert number of order flow data points in the last row
assert 19 == len(of) # Assert expected number of data points
of_values1 = list(of.values())
# Assert specific order flow values at the beginning of the last row
assert of_values1[0] == {
"bid": 1.0,
"ask": 0.0,
"delta": -12.536,
"bid_amount": 12.536,
"ask_amount": 0.0,
"total_volume": 12.536,
"total_trades": 1,
}
# Assert specific order flow values at the end of the last row
assert pytest.approx(of_values1[-1]) == {
"bid": 4.0,
"ask": 3.0,
"delta": -40.948,
"bid_amount": 59.182,
"ask_amount": 18.23399,
"total_volume": 77.416,
"total_trades": 7,
}
# --- Delta and Other Results ---
# Assert delta value from the first row
assert pytest.approx(results["delta"]) == -50.519
# Assert min and max delta values from the first row
assert results["min_delta"] == -79.469
assert results["max_delta"] == 17.298
# Assert that stacked imbalances are NaN (not applicable in this test)
assert np.isnan(results["stacked_imbalances_bid"])
assert np.isnan(results["stacked_imbalances_ask"])
# Repeat assertions for the third from last row
results = df.iloc[-2]
assert pytest.approx(results["delta"]) == -20.862
assert pytest.approx(results["min_delta"]) == -54.559999
assert 82.842 == results["max_delta"]
assert 234.99 == results["stacked_imbalances_bid"]
assert 234.96 == results["stacked_imbalances_ask"]
# Repeat assertions for the last row
results = df.iloc[-1]
assert pytest.approx(results["delta"]) == -49.302
assert results["min_delta"] == -70.222
assert pytest.approx(results["max_delta"]) == 11.213
assert np.isnan(results["stacked_imbalances_bid"])
assert np.isnan(results["stacked_imbalances_ask"])
def test_public_trades_trades_mock_populate_dataframe_with_trades__check_trades(
populate_dataframe_with_trades_dataframe, populate_dataframe_with_trades_trades
):
"""
Tests the `populate_dataframe_with_trades` function's handling of trades,
ensuring correct integration of trades data into the generated DataFrame.
"""
# Create copies of the input data to avoid modifying the originals
dataframe = populate_dataframe_with_trades_dataframe.copy()
trades = populate_dataframe_with_trades_trades.copy()
# --- Data Preparation ---
# Convert the 'date' column to datetime format with milliseconds
dataframe["date"] = pd.to_datetime(dataframe["date"], unit="ms")
# Select the final row of the DataFrame
dataframe = dataframe.tail().reset_index(drop=True)
# Filter trades to those occurring after or at the same time as the first DataFrame date
trades = trades.loc[trades.date >= dataframe.date[0]]
trades.reset_index(inplace=True, drop=True) # Reset index for clarity
# Assert the first trade ID to ensure filtering worked correctly
assert trades["id"][0] == "313881442"
# --- Configuration and Function Call ---
# Define configuration for order flow calculation (used for context)
config = {
"timeframe": "5m",
"orderflow": {
"cache_size": 1000,
"max_candles": 1500,
"scale": 0.5,
"imbalance_volume": 0,
"imbalance_ratio": 3,
"stacked_imbalance_range": 3,
},
}
# Populate the DataFrame with trades and order flow data
df, _ = populate_dataframe_with_trades(OrderedDict(), config, dataframe, trades)
# --- DataFrame and Trade Data Validation ---
row = df.iloc[0] # Extract the first row for assertions
# Assert DataFrame structure
assert list(df.columns) == [
# ... (list of expected column names)
"date",
"open",
"high",
"low",
"close",
"volume",
"trades",
"orderflow",
"imbalances",
"stacked_imbalances_bid",
"stacked_imbalances_ask",
"max_delta",
"min_delta",
"bid",
"ask",
"delta",
"total_trades",
]
# Assert delta, bid, and ask values
assert pytest.approx(row["delta"]) == -50.519
assert row["bid"] == 219.961
assert row["ask"] == 169.442
# Assert the number of trades
assert len(row["trades"]) == 151
# Assert specific details of the first trade
t = row["trades"][0]
assert list(t.keys()) == ["timestamp", "id", "type", "side", "price", "amount", "cost", "date"]
assert trades["id"][0] == t["id"]
assert int(trades["timestamp"][0]) == int(t["timestamp"])
assert t["side"] == "sell"
assert t["id"] == "313881442"
assert t["price"] == 234.72
def test_public_trades_put_volume_profile_into_ohlcv_candles(public_trades_list_simple, candles):
"""
Tests the integration of volume profile data into OHLCV candles.
This test verifies that
the `trades_to_volumeprofile_with_total_delta_bid_ask`
function correctly calculates the volume profile and that
it correctly assigns the delta value from the volume profile to the
corresponding candle in the `candles` DataFrame.
"""
# Convert the trade list to a DataFrame
trades_df = trades_list_to_df(public_trades_list_simple[DEFAULT_TRADES_COLUMNS].values.tolist())
# Generate the volume profile with the specified bin size
df = trades_to_volumeprofile_with_total_delta_bid_ask(trades_df, scale=BIN_SIZE_SCALE)
# Assert the delta value in the total-bid/delta response of the second candle
assert 0.14 == df.values.tolist()[1][2]
# Alternative assertion using `.iat` accessor (assuming correct assignment logic)
assert 0.14 == df["delta"].iat[1]
def test_public_trades_binned_big_sample_list(public_trades_list):
"""
Tests the `trades_to_volumeprofile_with_total_delta_bid_ask` function
with different bin sizes and verifies the generated DataFrame's structure and values.
"""
# Define the bin size for the first test
BIN_SIZE_SCALE = 0.05
# Convert the trade list to a DataFrame
trades = trades_list_to_df(public_trades_list[DEFAULT_TRADES_COLUMNS].values.tolist())
# Generate the volume profile with the specified bin size
df = trades_to_volumeprofile_with_total_delta_bid_ask(trades, scale=BIN_SIZE_SCALE)
# Assert that the DataFrame has the expected columns
assert df.columns.tolist() == [
"bid",
"ask",
"delta",
"bid_amount",
"ask_amount",
"total_volume",
"total_trades",
]
# Assert the number of rows in the DataFrame (expected 23 for this bin size)
assert len(df) == 23
# Assert that the index values are in ascending order and spaced correctly
assert all(df.index[i] < df.index[i + 1] for i in range(len(df) - 1))
assert df.index[0] + BIN_SIZE_SCALE == df.index[1]
assert (trades["price"].min() - BIN_SIZE_SCALE) < df.index[0] < trades["price"].max()
assert (df.index[0] + BIN_SIZE_SCALE) >= df.index[1]
assert (trades["price"].max() - BIN_SIZE_SCALE) < df.index[-1] < trades["price"].max()
# Assert specific values in the first and last rows of the DataFrame
assert 32 == df["bid"].iloc[0] # bid price
assert 197.512 == df["bid_amount"].iloc[0] # total bid amount
assert 88.98 == df["ask_amount"].iloc[0] # total ask amount
assert 26 == df["ask"].iloc[0] # ask price
assert -108.532 == pytest.approx(df["delta"].iloc[0]) # delta (bid amount - ask amount)
assert 3 == df["bid"].iloc[-1] # bid price
assert 50.659 == df["bid_amount"].iloc[-1] # total bid amount
assert 108.21 == df["ask_amount"].iloc[-1] # total ask amount
assert 44 == df["ask"].iloc[-1] # ask price
assert 57.551 == df["delta"].iloc[-1] # delta (bid amount - ask amount)
# Repeat the process with a larger bin size
BIN_SIZE_SCALE = 1
# Generate the volume profile with the larger bin size
df = trades_to_volumeprofile_with_total_delta_bid_ask(trades, scale=BIN_SIZE_SCALE)
# Assert the number of rows in the DataFrame (expected 2 for this bin size)
assert len(df) == 2
# Repeat similar assertions for index ordering and spacing
assert all(df.index[i] < df.index[i + 1] for i in range(len(df) - 1))
assert (trades["price"].min() - BIN_SIZE_SCALE) < df.index[0] < trades["price"].max()
assert (df.index[0] + BIN_SIZE_SCALE) >= df.index[1]
assert (trades["price"].max() - BIN_SIZE_SCALE) < df.index[-1] < trades["price"].max()
# Assert the value in the last row of the DataFrame with the larger bin size
assert 1667.0 == df.index[-1]
assert 710.98 == df["bid_amount"].iat[0]
assert 111 == df["bid"].iat[0]
assert 52.7199999 == pytest.approx(df["delta"].iat[0]) # delta
def test_public_trades_config_max_trades(
default_conf, populate_dataframe_with_trades_dataframe, populate_dataframe_with_trades_trades
):
dataframe = populate_dataframe_with_trades_dataframe.copy()
trades = populate_dataframe_with_trades_trades.copy()
default_conf["exchange"]["use_public_trades"] = True
orderflow_config = {
"timeframe": "5m",
"orderflow": {
"cache_size": 1000,
"max_candles": 1,
"scale": 0.005,
"imbalance_volume": 0,
"imbalance_ratio": 3,
"stacked_imbalance_range": 3,
},
}
df, _ = populate_dataframe_with_trades(
OrderedDict(), default_conf | orderflow_config, dataframe, trades
)
assert df.delta.count() == 1
def test_public_trades_testdata_sanity(
candles,
public_trades_list,
public_trades_list_simple,
populate_dataframe_with_trades_dataframe,
populate_dataframe_with_trades_trades,
):
assert 10999 == len(candles)
assert 1000 == len(public_trades_list)
assert 999 == len(populate_dataframe_with_trades_dataframe)
assert 293532 == len(populate_dataframe_with_trades_trades)
assert 7 == len(public_trades_list_simple)
assert (
5
== public_trades_list_simple.loc[
public_trades_list_simple["side"].str.contains("sell"), "id"
].count()
)
assert (
2
== public_trades_list_simple.loc[
public_trades_list_simple["side"].str.contains("buy"), "id"
].count()
)
assert public_trades_list.columns.tolist() == [
"timestamp",
"id",
"type",
"side",
"price",
"amount",
"cost",
"date",
]
assert public_trades_list.columns.tolist() == [
"timestamp",
"id",
"type",
"side",
"price",
"amount",
"cost",
"date",
]
assert public_trades_list_simple.columns.tolist() == [
"timestamp",
"id",
"type",
"side",
"price",
"amount",
"cost",
"date",
]
assert populate_dataframe_with_trades_dataframe.columns.tolist() == [
"date",
"open",
"high",
"low",
"close",
"volume",
]
assert populate_dataframe_with_trades_trades.columns.tolist() == [
"timestamp",
"id",
"type",
"side",
"price",
"amount",
"cost",
"date",
]

View File

@@ -62,6 +62,42 @@ def test_historic_ohlcv(mocker, default_conf, ohlcv_history):
assert historymock.call_args_list[0][1]["timeframe"] == "5m"
def test_historic_trades(mocker, default_conf, trades_history_df):
historymock = MagicMock(return_value=trades_history_df)
mocker.patch(
"freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler._trades_load",
historymock,
)
dp = DataProvider(default_conf, None)
# Live mode..
with pytest.raises(OperationalException, match=r"Exchange is not available to DataProvider\."):
dp.trades("UNITTEST/BTC", "5m")
exchange = get_patched_exchange(mocker, default_conf)
dp = DataProvider(default_conf, exchange)
data = dp.trades("UNITTEST/BTC", "5m")
assert isinstance(data, DataFrame)
assert len(data) == 0
# Switch to backtest mode
default_conf["runmode"] = RunMode.BACKTEST
default_conf["dataformat_trades"] = "feather"
exchange = get_patched_exchange(mocker, default_conf)
dp = DataProvider(default_conf, exchange)
data = dp.trades("UNITTEST/BTC", "5m")
assert isinstance(data, DataFrame)
assert len(data) == len(trades_history_df)
# Random other runmode
default_conf["runmode"] = RunMode.UTIL_EXCHANGE
dp = DataProvider(default_conf, None)
data = dp.trades("UNITTEST/BTC", "5m")
assert isinstance(data, DataFrame)
assert len(data) == 0
def test_historic_ohlcv_dataformat(mocker, default_conf, ohlcv_history):
hdf5loadmock = MagicMock(return_value=ohlcv_history)
featherloadmock = MagicMock(return_value=ohlcv_history)
@@ -247,8 +283,8 @@ def test_emit_df(mocker, default_conf, ohlcv_history):
def test_refresh(mocker, default_conf):
refresh_mock = MagicMock()
mocker.patch(f"{EXMS}.refresh_latest_ohlcv", refresh_mock)
refresh_mock = mocker.patch(f"{EXMS}.refresh_latest_ohlcv")
mock_refresh_trades = mocker.patch(f"{EXMS}.refresh_latest_trades")
exchange = get_patched_exchange(mocker, default_conf, exchange="binance")
timeframe = default_conf["timeframe"]
@@ -258,7 +294,7 @@ def test_refresh(mocker, default_conf):
dp = DataProvider(default_conf, exchange)
dp.refresh(pairs)
assert mock_refresh_trades.call_count == 0
assert refresh_mock.call_count == 1
assert len(refresh_mock.call_args[0]) == 1
assert len(refresh_mock.call_args[0][0]) == len(pairs)
@@ -266,11 +302,20 @@ def test_refresh(mocker, default_conf):
refresh_mock.reset_mock()
dp.refresh(pairs, pairs_non_trad)
assert mock_refresh_trades.call_count == 0
assert refresh_mock.call_count == 1
assert len(refresh_mock.call_args[0]) == 1
assert len(refresh_mock.call_args[0][0]) == len(pairs) + len(pairs_non_trad)
assert refresh_mock.call_args[0][0] == pairs + pairs_non_trad
# Test with public trades
refresh_mock.reset_mock()
refresh_mock.reset_mock()
default_conf["exchange"]["use_public_trades"] = True
dp.refresh(pairs, pairs_non_trad)
assert mock_refresh_trades.call_count == 1
assert refresh_mock.call_count == 1
def test_orderbook(mocker, default_conf, order_book_l2):
api_mock = MagicMock()

View File

@@ -154,10 +154,10 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, use
assert "-3.5" in captured.out
assert "50" in captured.out
assert "0" in captured.out
assert "0.01616" in captured.out
assert "0.016" in captured.out
assert "34.049" in captured.out
assert "0.104411" in captured.out
assert "52.8292" in captured.out
assert "0.104" in captured.out
assert "52.829" in captured.out
# test group 1
args = get_args(base_args + ["--analysis-groups", "1"])

View File

@@ -151,9 +151,7 @@ def test_load_data_with_new_pair_1min(
)
load_pair_history(datadir=tmp_path, timeframe="1m", pair="MEME/BTC", candle_type=candle_type)
assert file.is_file()
assert log_has_re(
r'\(0/1\) - Download history data for "MEME/BTC", 1m, ' r"spot and store in .*", caplog
)
assert log_has_re(r'Download history data for "MEME/BTC", 1m, ' r"spot and store in .*", caplog)
def test_testdata_path(testdatadir) -> None:
@@ -677,7 +675,7 @@ def test_download_trades_history(
assert not _download_trades_history(
data_handler=data_handler, exchange=exchange, pair="ETH/BTC", trading_mode=TradingMode.SPOT
)
assert log_has_re('Failed to download historic trades for pair: "ETH/BTC".*', caplog)
assert log_has_re('Failed to download and store historic trades for pair: "ETH/BTC".*', caplog)
file2 = tmp_path / "XRP_ETH-trades.json.gz"
copyfile(testdatadir / file2.name, file2)

View File

@@ -600,7 +600,7 @@ async def test__async_get_historic_ohlcv_binance(default_conf, mocker, caplog, c
@pytest.mark.parametrize(
"pair,nominal_value,mm_ratio,amt",
"pair,notional_value,mm_ratio,amt",
[
("XRP/USDT:USDT", 0.0, 0.025, 0),
("BNB/USDT:USDT", 100.0, 0.0065, 0),
@@ -615,12 +615,12 @@ def test_get_maintenance_ratio_and_amt_binance(
mocker,
leverage_tiers,
pair,
nominal_value,
notional_value,
mm_ratio,
amt,
):
mocker.patch(f"{EXMS}.exchange_has", return_value=True)
exchange = get_patched_exchange(mocker, default_conf, exchange="binance")
exchange._leverage_tiers = leverage_tiers
(result_ratio, result_amt) = exchange.get_maintenance_ratio_and_amt(pair, nominal_value)
(result_ratio, result_amt) = exchange.get_maintenance_ratio_and_amt(pair, notional_value)
assert (round(result_ratio, 8), round(result_amt, 8)) == (mm_ratio, amt)

View File

@@ -8,8 +8,9 @@ from unittest.mock import MagicMock, Mock, PropertyMock, patch
import ccxt
import pytest
from numpy import nan
from pandas import DataFrame
from pandas import DataFrame, to_datetime
from freqtrade.constants import DEFAULT_DATAFRAME_COLUMNS
from freqtrade.enums import CandleType, MarginMode, RunMode, TradingMode
from freqtrade.exceptions import (
ConfigurationError,
@@ -325,6 +326,22 @@ def test_validate_order_time_in_force(default_conf, mocker, caplog):
ex.validate_order_time_in_force(tif2)
def test_validate_orderflow(default_conf, mocker, caplog):
caplog.set_level(logging.INFO)
# Test bybit - as it doesn't support historic trades data.
ex = get_patched_exchange(mocker, default_conf, exchange="bybit")
mocker.patch(f"{EXMS}.exchange_has", return_value=True)
ex.validate_orderflow({"use_public_trades": False})
with pytest.raises(ConfigurationError, match=r"Trade data not available for.*"):
ex.validate_orderflow({"use_public_trades": True})
# Binance supports orderflow.
ex = get_patched_exchange(mocker, default_conf, exchange="binance")
ex.validate_orderflow({"use_public_trades": False})
ex.validate_orderflow({"use_public_trades": True})
@pytest.mark.parametrize(
"price,precision_mode,precision,expected",
[
@@ -2371,6 +2388,163 @@ def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None
assert len(res) == 1
@pytest.mark.parametrize("candle_type", [CandleType.FUTURES, CandleType.SPOT])
def test_refresh_latest_trades(
mocker, default_conf, caplog, candle_type, tmp_path, time_machine
) -> None:
time_machine.move_to(dt_now(), tick=False)
trades = [
{
# unix timestamp ms
"timestamp": dt_ts(dt_now() - timedelta(minutes=5)),
"amount": 16.512,
"cost": 10134.07488,
"fee": None,
"fees": [],
"id": "354669639",
"order": None,
"price": 613.74,
"side": "sell",
"takerOrMaker": None,
"type": None,
},
{
"timestamp": dt_ts(), # unix timestamp ms
"amount": 12.512,
"cost": 1000,
"fee": None,
"fees": [],
"id": "354669640",
"order": None,
"price": 613.84,
"side": "buy",
"takerOrMaker": None,
"type": None,
},
]
caplog.set_level(logging.DEBUG)
use_trades_conf = default_conf
use_trades_conf["exchange"]["use_public_trades"] = True
use_trades_conf["datadir"] = tmp_path
use_trades_conf["orderflow"] = {"max_candles": 1500}
exchange = get_patched_exchange(mocker, use_trades_conf)
exchange._api_async.fetch_trades = get_mock_coro(trades)
exchange._ft_has["exchange_has_overrides"]["fetchTrades"] = True
pairs = [("IOTA/USDT:USDT", "5m", candle_type), ("XRP/USDT:USDT", "5m", candle_type)]
# empty dicts
assert not exchange._trades
res = exchange.refresh_latest_trades(pairs, cache=False)
# No caching
assert not exchange._trades
assert len(res) == len(pairs)
assert exchange._api_async.fetch_trades.call_count == 4
exchange._api_async.fetch_trades.reset_mock()
exchange.required_candle_call_count = 2
res = exchange.refresh_latest_trades(pairs)
assert len(res) == len(pairs)
assert log_has(f"Refreshing TRADES data for {len(pairs)} pairs", caplog)
assert exchange._trades
assert exchange._api_async.fetch_trades.call_count == 4
exchange._api_async.fetch_trades.reset_mock()
for pair in pairs:
assert isinstance(exchange.trades(pair), DataFrame)
assert len(exchange.trades(pair)) > 0
# trades function should return a different object on each call
# if copy is "True"
assert exchange.trades(pair) is not exchange.trades(pair)
assert exchange.trades(pair) is not exchange.trades(pair, copy=True)
assert exchange.trades(pair, copy=True) is not exchange.trades(pair, copy=True)
assert exchange.trades(pair, copy=False) is exchange.trades(pair, copy=False)
# test caching
ohlcv = [
[
dt_ts(dt_now() - timedelta(minutes=5)), # unix timestamp ms
1, # open
2, # high
3, # low
4, # close
5, # volume (in quote currency)
],
[
dt_ts(), # unix timestamp ms
3, # open
1, # high
4, # low
6, # close
5, # volume (in quote currency)
],
]
cols = DEFAULT_DATAFRAME_COLUMNS
trades_df = DataFrame(ohlcv, columns=cols)
trades_df["date"] = to_datetime(trades_df["date"], unit="ms", utc=True)
trades_df["date"] = trades_df["date"].apply(lambda date: timeframe_to_prev_date("5m", date))
exchange._klines[pair] = trades_df
res = exchange.refresh_latest_trades(
[("IOTA/USDT:USDT", "5m", candle_type), ("XRP/USDT:USDT", "5m", candle_type)]
)
assert len(res) == 0
assert exchange._api_async.fetch_trades.call_count == 0
caplog.clear()
# Reset refresh times
for pair in pairs:
# test caching with "expired" candle
trades = [
{
# unix timestamp ms
"timestamp": dt_ts(exchange._klines[pair].iloc[-1].date - timedelta(minutes=5)),
"amount": 16.512,
"cost": 10134.07488,
"fee": None,
"fees": [],
"id": "354669639",
"order": None,
"price": 613.74,
"side": "sell",
"takerOrMaker": None,
"type": None,
}
]
trades_df = DataFrame(trades)
trades_df["date"] = to_datetime(trades_df["timestamp"], unit="ms", utc=True)
exchange._trades[pair] = trades_df
res = exchange.refresh_latest_trades(
[("IOTA/USDT:USDT", "5m", candle_type), ("XRP/USDT:USDT", "5m", candle_type)]
)
assert len(res) == len(pairs)
assert exchange._api_async.fetch_trades.call_count == 4
# cache - but disabled caching
exchange._api_async.fetch_trades.reset_mock()
exchange.required_candle_call_count = 1
pairlist = [
("IOTA/ETH", "5m", candle_type),
("XRP/ETH", "5m", candle_type),
("XRP/ETH", "1d", candle_type),
]
res = exchange.refresh_latest_trades(pairlist, cache=False)
assert len(res) == 3
assert exchange._api_async.fetch_trades.call_count == 6
# Test the same again, should NOT return from cache!
exchange._api_async.fetch_trades.reset_mock()
res = exchange.refresh_latest_trades(pairlist, cache=False)
assert len(res) == 3
assert exchange._api_async.fetch_trades.call_count == 6
exchange._api_async.fetch_trades.reset_mock()
caplog.clear()
@pytest.mark.parametrize("candle_type", [CandleType.FUTURES, CandleType.MARK, CandleType.SPOT])
def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_machine) -> None:
start = datetime(2021, 8, 1, 0, 0, 0, 0, tzinfo=timezone.utc)

View File

@@ -30,6 +30,12 @@ class TestCCXTExchangeWs:
m_hist = mocker.spy(exch, "_async_get_historic_ohlcv")
m_cand = mocker.spy(exch, "_async_get_candle_history")
while True:
# Don't start the test if we are too close to the end of the minute.
if dt_now().second < 50 and dt_now().second != 0:
break
sleep(1)
res = exch.refresh_latest_ohlcv([pair_tf])
assert m_cand.call_count == 1

View File

@@ -291,9 +291,10 @@ def test_log_results_if_loss_improves(hyperopt, capsys) -> None:
"is_best": True,
}
)
hyperopt._hyper_out.print()
out, _err = capsys.readouterr()
assert all(
x in out for x in ["Best", "2/2", " 1", "0.10%", "0.00100000 BTC (1.00%)", "00:20:00"]
x in out for x in ["Best", "2/2", "1", "0.10%", "0.00100000 BTC (1.00%)", "0:20:00"]
)

View File

@@ -147,7 +147,7 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
instance = LookaheadAnalysis(lookahead_conf, strategy_obj)
instance.current_analysis = analysis
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
lookahead_conf, [instance]
)
@@ -163,14 +163,14 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
analysis.false_exit_signals = 10
instance = LookaheadAnalysis(lookahead_conf, strategy_obj)
instance.current_analysis = analysis
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
lookahead_conf, [instance]
)
assert data[0][2].__contains__("error")
# edit it into not showing an error
instance.failed_bias_check = False
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
lookahead_conf, [instance]
)
assert data[0][0] == "strategy_test_v3_with_lookahead_bias.py"
@@ -183,7 +183,7 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
analysis.false_indicators.append("falseIndicator1")
analysis.false_indicators.append("falseIndicator2")
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
lookahead_conf, [instance]
)
@@ -193,7 +193,7 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
assert len(data) == 1
# check amount of multiple rows
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
lookahead_conf, [instance, instance, instance]
)
assert len(data) == 3

View File

@@ -59,7 +59,7 @@ def _backup_file(file: Path, copy_file: bool = False) -> None:
copyfile(file_swp, file)
def test_text_table_bt_results():
def test_text_table_bt_results(capsys):
results = pd.DataFrame(
{
"pair": ["ETH/BTC", "ETH/BTC", "ETH/BTC"],
@@ -69,21 +69,23 @@ def test_text_table_bt_results():
}
)
result_str = (
"| Pair | Trades | Avg Profit % | Tot Profit BTC | "
"Tot Profit % | Avg Duration | Win Draw Loss Win% |\n"
"|---------+----------+----------------+------------------+"
"----------------+----------------+-------------------------|\n"
"| ETH/BTC | 3 | 8.33 | 0.50000000 | "
"12.50 | 0:20:00 | 2 0 1 66.7 |\n"
"| TOTAL | 3 | 8.33 | 0.50000000 | "
"12.50 | 0:20:00 | 2 0 1 66.7 |"
)
pair_results = generate_pair_metrics(
["ETH/BTC"], stake_currency="BTC", starting_balance=4, results=results
)
assert text_table_bt_results(pair_results, stake_currency="BTC") == result_str
text_table_bt_results(pair_results, stake_currency="BTC", title="title")
text = capsys.readouterr().out
re.search(
r".* Pair .* Trades .* Avg Profit % .* Tot Profit BTC .* Tot Profit % .* "
r"Avg Duration .* Win Draw Loss Win% .*",
text,
)
re.search(
r".* ETH/BTC .* 3 .* 8.33 .* 0.50000000 .* 12.50 .* 0:20:00 .* 2 0 1 66.7 .*",
text,
)
re.search(
r".* TOTAL .* 3 .* 8.33 .* 0.50000000 .* 12.50 .* 0:20:00 .* 2 0 1 66.7 .*", text
)
def test_generate_backtest_stats(default_conf, testdatadir, tmp_path):
@@ -434,7 +436,7 @@ def test_calc_streak(testdatadir):
assert calc_streak(bt_data) == (7, 18)
def test_text_table_exit_reason():
def test_text_table_exit_reason(capsys):
results = pd.DataFrame(
{
"pair": ["ETH/BTC", "ETH/BTC", "ETH/BTC"],
@@ -448,23 +450,28 @@ def test_text_table_exit_reason():
}
)
result_str = (
"| Exit Reason | Exits | Avg Profit % | Tot Profit BTC | Tot Profit % |"
" Avg Duration | Win Draw Loss Win% |\n"
"|---------------+---------+----------------+------------------+----------------+"
"----------------+-------------------------|\n"
"| roi | 2 | 15.00 | 0.60000000 | 2.73 |"
" 0:20:00 | 2 0 0 100 |\n"
"| stop_loss | 1 | -10.00 | -0.20000000 | -0.91 |"
" 0:10:00 | 0 0 1 0 |\n"
"| TOTAL | 3 | 6.67 | 0.40000000 | 1.82 |"
" 0:17:00 | 2 0 1 66.7 |"
)
exit_reason_stats = generate_tag_metrics(
"exit_reason", starting_balance=22, results=results, skip_nan=False
)
assert text_table_tags("exit_tag", exit_reason_stats, "BTC") == result_str
text_table_tags("exit_tag", exit_reason_stats, "BTC")
text = capsys.readouterr().out
assert re.search(
r".* Exit Reason .* Exits .* Avg Profit % .* Tot Profit BTC .* Tot Profit % .* "
r"Avg Duration .* Win Draw Loss Win% .*",
text,
)
assert re.search(
r".* roi .* 2 .* 15.0 .* 0.60000000 .* 2.73 .* 0:20:00 .* 2 0 0 100 .*",
text,
)
assert re.search(
r".* stop_loss .* 1 .* -10.0 .* -0.20000000 .* -0.91 .* 0:10:00 .* 0 0 1 0 .*",
text,
)
assert re.search(
r".* TOTAL .* 3 .* 6.67 .* 0.40000000 .* 1.82 .* 0:17:00 .* 2 0 1 66.7 .*", text
)
def test_generate_sell_reason_stats():
@@ -502,39 +509,42 @@ def test_generate_sell_reason_stats():
assert stop_result["profit_mean_pct"] == round(stop_result["profit_mean"] * 100, 2)
def test_text_table_strategy(testdatadir):
def test_text_table_strategy(testdatadir, capsys):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
bt_res_data = load_backtest_stats(filename)
bt_res_data_comparison = bt_res_data.pop("strategy_comparison")
result_str = (
"| Strategy | Trades | Avg Profit % | Tot Profit BTC |"
" Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |\n"
"|----------------+----------+----------------+------------------+"
"----------------+----------------+-------------------------+-----------------------|\n"
"| StrategyTestV2 | 179 | 0.08 | 0.02608550 |"
" 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |\n"
"| TestStrategy | 179 | 0.08 | 0.02608550 |"
" 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |"
)
strategy_results = generate_strategy_comparison(bt_stats=bt_res_data["strategy"])
assert strategy_results == bt_res_data_comparison
assert text_table_strategy(strategy_results, "BTC") == result_str
text_table_strategy(strategy_results, "BTC", "STRATEGY SUMMARY")
captured = capsys.readouterr()
text = captured.out
assert re.search(
r".* Strategy .* Trades .* Avg Profit % .* Tot Profit BTC .* Tot Profit % .* "
r"Avg Duration .* Win Draw Loss Win% .* Drawdown .*",
text,
)
assert re.search(
r".*StrategyTestV2 .* 179 .* 0.08 .* 0.02608550 .* "
r"260.85 .* 3:40:00 .* 170 0 9 95.0 .* 0.00308222 BTC 8.67%.*",
text,
)
assert re.search(
r".*TestStrategy .* 179 .* 0.08 .* 0.02608550 .* "
r"260.85 .* 3:40:00 .* 170 0 9 95.0 .* 0.00308222 BTC 8.67%.*",
text,
)
def test_generate_edge_table():
def test_generate_edge_table(capsys):
results = {}
results["ETH/BTC"] = PairInfo(-0.01, 0.60, 2, 1, 3, 10, 60)
assert generate_edge_table(results).count("+") == 7
assert generate_edge_table(results).count("| ETH/BTC |") == 1
assert (
generate_edge_table(results).count(
"| Risk Reward Ratio | Required Risk Reward | Expectancy |"
)
== 1
)
generate_edge_table(results)
text = capsys.readouterr().out
assert re.search(r".* ETH/BTC .*", text)
assert re.search(r".* Risk Reward Ratio .* Required Risk Reward .* Expectancy .*", text)
def test_generate_periodic_breakdown_stats(testdatadir):

View File

@@ -105,9 +105,7 @@ def test_recursive_helper_text_table_recursive_analysis_instances(recursive_conf
instance = RecursiveAnalysis(recursive_conf, strategy_obj)
instance.dict_recursive = dict_diff
_table, _headers, data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances(
[instance]
)
data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances([instance])
# check row contents for a try that has too few signals
assert data[0][0] == "rsi"
@@ -118,9 +116,7 @@ def test_recursive_helper_text_table_recursive_analysis_instances(recursive_conf
dict_diff = dict()
instance = RecursiveAnalysis(recursive_conf, strategy_obj)
instance.dict_recursive = dict_diff
_table, _headers, data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances(
[instance]
)
data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances([instance])
assert len(data) == 0

View File

@@ -0,0 +1,370 @@
from datetime import datetime, timezone
from unittest.mock import MagicMock
import pandas as pd
import pytest
from freqtrade.data.converter import ohlcv_to_dataframe
from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException
from freqtrade.plugins.pairlist.PercentChangePairList import PercentChangePairList
from freqtrade.plugins.pairlistmanager import PairListManager
from tests.conftest import (
EXMS,
generate_test_data_raw,
get_patched_exchange,
get_patched_freqtradebot,
)
@pytest.fixture(scope="function")
def rpl_config(default_conf):
default_conf["stake_currency"] = "USDT"
default_conf["exchange"]["pair_whitelist"] = [
"ETH/USDT",
"XRP/USDT",
]
default_conf["exchange"]["pair_blacklist"] = ["BLK/USDT"]
return default_conf
def test_volume_change_pair_list_init_exchange_support(mocker, rpl_config):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
"refresh_period": 86400,
}
]
with pytest.raises(
OperationalException,
match=r"Exchange does not support dynamic whitelist in this configuration. "
r"Please edit your config and either remove PercentChangePairList, "
r"or switch to using candles. and restart the bot.",
):
get_patched_freqtradebot(mocker, rpl_config)
def test_volume_change_pair_list_init_wrong_refresh_period(mocker, rpl_config):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
"refresh_period": 1800,
"lookback_days": 4,
}
]
with pytest.raises(
OperationalException,
match=r"Refresh period of 1800 seconds is smaller than one "
r"timeframe of 1d. Please adjust refresh_period "
r"to at least 86400 and restart the bot.",
):
get_patched_freqtradebot(mocker, rpl_config)
def test_volume_change_pair_list_init_wrong_lookback_period(mocker, rpl_config):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
"refresh_period": 86400,
"lookback_days": 3,
"lookback_period": 3,
}
]
with pytest.raises(
OperationalException,
match=r"Ambiguous configuration: lookback_days "
r"and lookback_period both set in pairlist config. "
r"Please set lookback_days only or lookback_period "
r"and lookback_timeframe and restart the bot.",
):
get_patched_freqtradebot(mocker, rpl_config)
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
"refresh_period": 86400,
"lookback_days": 1001,
}
]
with pytest.raises(
OperationalException,
match=r"ChangeFilter requires lookback_period to not exceed"
r" exchange max request size \(1000\)",
):
get_patched_freqtradebot(mocker, rpl_config)
def test_volume_change_pair_list_init_wrong_config(mocker, rpl_config):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"sort_key": "percentage",
"min_value": 0,
"refresh_period": 86400,
}
]
with pytest.raises(
OperationalException,
match=r"`number_assets` not specified. Please check your configuration "
r'for "pairlist.config.number_assets"',
):
get_patched_freqtradebot(mocker, rpl_config)
def test_gen_pairlist_with_valid_change_pair_list_config(mocker, rpl_config, tickers, time_machine):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
"refresh_period": 86400,
"lookback_days": 4,
}
]
start = datetime(2024, 8, 1, 0, 0, 0, 0, tzinfo=timezone.utc)
time_machine.move_to(start, tick=False)
mock_ohlcv_data = {
("ETH/USDT", "1d", CandleType.SPOT): pd.DataFrame(
ohlcv_to_dataframe(
generate_test_data_raw("1d", 100, start.strftime("%Y-%m-%d"), random_seed=12),
"1d",
pair="ETH/USDT",
fill_missing=True,
)
),
("BTC/USDT", "1d", CandleType.SPOT): pd.DataFrame(
ohlcv_to_dataframe(
generate_test_data_raw("1d", 100, start.strftime("%Y-%m-%d"), random_seed=13),
"1d",
pair="BTC/USDT",
fill_missing=True,
)
),
("XRP/USDT", "1d", CandleType.SPOT): pd.DataFrame(
ohlcv_to_dataframe(
generate_test_data_raw("1d", 100, start.strftime("%Y-%m-%d"), random_seed=14),
"1d",
pair="XRP/USDT",
fill_missing=True,
)
),
("NEO/USDT", "1d", CandleType.SPOT): pd.DataFrame(
ohlcv_to_dataframe(
generate_test_data_raw("1d", 100, start.strftime("%Y-%m-%d"), random_seed=15),
"1d",
pair="NEO/USDT",
fill_missing=True,
)
),
("TKN/USDT", "1d", CandleType.SPOT): pd.DataFrame(
# Make sure always have highest percentage
{
"timestamp": [
"2024-07-01 00:00:00",
"2024-07-01 01:00:00",
"2024-07-01 02:00:00",
"2024-07-01 03:00:00",
"2024-07-01 04:00:00",
"2024-07-01 05:00:00",
],
"open": [100, 102, 101, 103, 104, 105],
"high": [102, 103, 102, 104, 105, 106],
"low": [99, 101, 100, 102, 103, 104],
"close": [101, 102, 103, 104, 105, 106],
"volume": [1000, 1500, 2000, 2500, 3000, 3500],
}
),
}
mocker.patch(f"{EXMS}.refresh_latest_ohlcv", MagicMock(return_value=mock_ohlcv_data))
exchange = get_patched_exchange(mocker, rpl_config, exchange="binance")
pairlistmanager = PairListManager(exchange, rpl_config)
remote_pairlist = PercentChangePairList(
exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0
)
result = remote_pairlist.gen_pairlist(tickers)
assert len(result) == 2
assert result == ["NEO/USDT", "TKN/USDT"]
def test_filter_pairlist_with_empty_ticker(mocker, rpl_config, tickers, time_machine):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
"refresh_period": 86400,
"sort_direction": "asc",
"lookback_days": 4,
}
]
start = datetime(2024, 8, 1, 0, 0, 0, 0, tzinfo=timezone.utc)
time_machine.move_to(start, tick=False)
mock_ohlcv_data = {
("ETH/USDT", "1d", CandleType.SPOT): pd.DataFrame(
{
"timestamp": [
"2024-07-01 00:00:00",
"2024-07-01 01:00:00",
"2024-07-01 02:00:00",
"2024-07-01 03:00:00",
"2024-07-01 04:00:00",
"2024-07-01 05:00:00",
],
"open": [100, 102, 101, 103, 104, 105],
"high": [102, 103, 102, 104, 105, 106],
"low": [99, 101, 100, 102, 103, 104],
"close": [101, 102, 103, 104, 105, 105],
"volume": [1000, 1500, 2000, 2500, 3000, 3500],
}
),
("XRP/USDT", "1d", CandleType.SPOT): pd.DataFrame(
{
"timestamp": [
"2024-07-01 00:00:00",
"2024-07-01 01:00:00",
"2024-07-01 02:00:00",
"2024-07-01 03:00:00",
"2024-07-01 04:00:00",
"2024-07-01 05:00:00",
],
"open": [100, 102, 101, 103, 104, 105],
"high": [102, 103, 102, 104, 105, 106],
"low": [99, 101, 100, 102, 103, 104],
"close": [101, 102, 103, 104, 105, 104],
"volume": [1000, 1500, 2000, 2500, 3000, 3400],
}
),
}
mocker.patch(f"{EXMS}.refresh_latest_ohlcv", MagicMock(return_value=mock_ohlcv_data))
exchange = get_patched_exchange(mocker, rpl_config, exchange="binance")
pairlistmanager = PairListManager(exchange, rpl_config)
remote_pairlist = PercentChangePairList(
exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0
)
result = remote_pairlist.filter_pairlist(rpl_config["exchange"]["pair_whitelist"], {})
assert len(result) == 2
assert result == ["XRP/USDT", "ETH/USDT"]
def test_filter_pairlist_with_max_value_set(mocker, rpl_config, tickers, time_machine):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
"max_value": 15,
"refresh_period": 86400,
"lookback_days": 4,
}
]
start = datetime(2024, 8, 1, 0, 0, 0, 0, tzinfo=timezone.utc)
time_machine.move_to(start, tick=False)
mock_ohlcv_data = {
("ETH/USDT", "1d", CandleType.SPOT): pd.DataFrame(
{
"timestamp": [
"2024-07-01 00:00:00",
"2024-07-01 01:00:00",
"2024-07-01 02:00:00",
"2024-07-01 03:00:00",
"2024-07-01 04:00:00",
"2024-07-01 05:00:00",
],
"open": [100, 102, 101, 103, 104, 105],
"high": [102, 103, 102, 104, 105, 106],
"low": [99, 101, 100, 102, 103, 104],
"close": [101, 102, 103, 104, 105, 106],
"volume": [1000, 1500, 2000, 1800, 2400, 2500],
}
),
("XRP/USDT", "1d", CandleType.SPOT): pd.DataFrame(
{
"timestamp": [
"2024-07-01 00:00:00",
"2024-07-01 01:00:00",
"2024-07-01 02:00:00",
"2024-07-01 03:00:00",
"2024-07-01 04:00:00",
"2024-07-01 05:00:00",
],
"open": [100, 102, 101, 103, 104, 105],
"high": [102, 103, 102, 104, 105, 106],
"low": [99, 101, 100, 102, 103, 104],
"close": [101, 102, 103, 104, 105, 101],
"volume": [1000, 1500, 2000, 2500, 3000, 3500],
}
),
}
mocker.patch(f"{EXMS}.refresh_latest_ohlcv", MagicMock(return_value=mock_ohlcv_data))
exchange = get_patched_exchange(mocker, rpl_config, exchange="binance")
pairlistmanager = PairListManager(exchange, rpl_config)
remote_pairlist = PercentChangePairList(
exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0
)
result = remote_pairlist.filter_pairlist(rpl_config["exchange"]["pair_whitelist"], {})
assert len(result) == 1
assert result == ["ETH/USDT"]
def test_gen_pairlist_from_tickers(mocker, rpl_config, tickers):
rpl_config["pairlists"] = [
{
"method": "PercentChangePairList",
"number_assets": 2,
"sort_key": "percentage",
"min_value": 0,
}
]
mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True))
exchange = get_patched_exchange(mocker, rpl_config, exchange="binance")
pairlistmanager = PairListManager(exchange, rpl_config)
remote_pairlist = PercentChangePairList(
exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0
)
result = remote_pairlist.gen_pairlist(tickers.return_value)
assert len(result) == 1
assert result == ["ETH/USDT"]

View File

@@ -27,7 +27,7 @@ from freqtrade.configuration.load_config import (
)
from freqtrade.constants import DEFAULT_DB_DRYRUN_URL, DEFAULT_DB_PROD_URL, ENV_VAR_PREFIX
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
from freqtrade.exceptions import ConfigurationError, OperationalException
from tests.conftest import (
CURRENT_TEST_STRATEGY,
log_has,
@@ -1103,6 +1103,29 @@ def test__validate_consumers(default_conf, caplog) -> None:
assert log_has_re("To receive best performance with external data.*", caplog)
def test__validate_orderflow(default_conf) -> None:
conf = deepcopy(default_conf)
conf["exchange"]["use_public_trades"] = True
with pytest.raises(
ConfigurationError,
match="Orderflow is a required configuration key when using public trades.",
):
validate_config_consistency(conf)
conf.update(
{
"orderflow": {
"scale": 0.5,
"stacked_imbalance_range": 3,
"imbalance_volume": 100,
"imbalance_ratio": 3,
}
}
)
# Should pass.
validate_config_consistency(conf)
def test_load_config_test_comments() -> None:
"""
Load config with comments

1
tests/testdata/orderflow/candles.json vendored Normal file

File diff suppressed because one or more lines are too long

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
,timestamp,id,type,side,price,amount,cost,date
0,1675311000092, 1588563957, ,buy, 23438.0, 0.013, 0, 2023-02-02 04:10:00.092000+00:00
1,1675311000211, 1588563958, ,sell, 23437.5, 0.001, 0, 2023-02-02 04:10:00.211000+00:00
2,1675311000335, 1588563959, ,sell , 23437.5, 0.196, 0, 2023-02-02 04:10:00.335000+00:00
3,1675311000769, 1588563960, , sell, 23437.5, 0.046, 0, 2023-02-02 04:10:00.769000+00:00
4,1675311000773, 1588563961, ,buy , 23438.0, 0.127, 0, 2023-02-02 04:10:00.773000+00:00
5,1675311000774, 1588563959, ,sell, 23437.5, 0.001, 0, 2023-02-02 04:10:00.774000+00:00
6,1675311000775, 1588563960, ,sell, 23437.5, 0.001, 0, 2023-02-02 04:10:00.775000+00:00
1 timestamp id type side price amount cost date
2 0 1675311000092 1588563957 buy 23438.0 0.013 0 2023-02-02 04:10:00.092000+00:00
3 1 1675311000211 1588563958 sell 23437.5 0.001 0 2023-02-02 04:10:00.211000+00:00
4 2 1675311000335 1588563959 sell 23437.5 0.196 0 2023-02-02 04:10:00.335000+00:00
5 3 1675311000769 1588563960 sell 23437.5 0.046 0 2023-02-02 04:10:00.769000+00:00
6 4 1675311000773 1588563961 buy 23438.0 0.127 0 2023-02-02 04:10:00.773000+00:00
7 5 1675311000774 1588563959 sell 23437.5 0.001 0 2023-02-02 04:10:00.774000+00:00
8 6 1675311000775 1588563960 sell 23437.5 0.001 0 2023-02-02 04:10:00.775000+00:00