mirror of
https://github.com/freqtrade/freqtrade.git
synced 2025-12-03 02:23:05 +00:00
ruff format: Update more test files
This commit is contained in:
@@ -12,25 +12,25 @@ from tests.conftest import EXMS, get_patched_worker, log_has, log_has_re
|
||||
|
||||
|
||||
def test_worker_state(mocker, default_conf, markets) -> None:
|
||||
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
|
||||
mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets))
|
||||
worker = get_patched_worker(mocker, default_conf)
|
||||
assert worker.freqtrade.state is State.RUNNING
|
||||
|
||||
default_conf.pop('initial_state')
|
||||
default_conf.pop("initial_state")
|
||||
worker = Worker(args=None, config=default_conf)
|
||||
assert worker.freqtrade.state is State.STOPPED
|
||||
|
||||
|
||||
def test_worker_running(mocker, default_conf, caplog) -> None:
|
||||
mock_throttle = MagicMock()
|
||||
mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)
|
||||
mocker.patch('freqtrade.persistence.Trade.stoploss_reinitialization', MagicMock())
|
||||
mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle)
|
||||
mocker.patch("freqtrade.persistence.Trade.stoploss_reinitialization", MagicMock())
|
||||
|
||||
worker = get_patched_worker(mocker, default_conf)
|
||||
|
||||
state = worker._worker(old_state=None)
|
||||
assert state is State.RUNNING
|
||||
assert log_has('Changing state to: RUNNING', caplog)
|
||||
assert log_has("Changing state to: RUNNING", caplog)
|
||||
assert mock_throttle.call_count == 1
|
||||
# Check strategy is loaded, and received a dataprovider object
|
||||
assert worker.freqtrade.strategy
|
||||
@@ -40,13 +40,13 @@ def test_worker_running(mocker, default_conf, caplog) -> None:
|
||||
|
||||
def test_worker_stopped(mocker, default_conf, caplog) -> None:
|
||||
mock_throttle = MagicMock()
|
||||
mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)
|
||||
mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle)
|
||||
|
||||
worker = get_patched_worker(mocker, default_conf)
|
||||
worker.freqtrade.state = State.STOPPED
|
||||
state = worker._worker(old_state=State.RUNNING)
|
||||
assert state is State.STOPPED
|
||||
assert log_has('Changing state from RUNNING to: STOPPED', caplog)
|
||||
assert log_has("Changing state from RUNNING to: STOPPED", caplog)
|
||||
assert mock_throttle.call_count == 1
|
||||
|
||||
|
||||
@@ -70,11 +70,11 @@ def test_throttle(mocker, default_conf, caplog) -> None:
|
||||
|
||||
|
||||
def test_throttle_sleep_time(mocker, default_conf, caplog) -> None:
|
||||
|
||||
caplog.set_level(logging.DEBUG)
|
||||
worker = get_patched_worker(mocker, default_conf)
|
||||
sleep_mock = mocker.patch("freqtrade.worker.Worker._sleep")
|
||||
with time_machine.travel("2022-09-01 05:00:00 +00:00") as t:
|
||||
|
||||
def throttled_func(x=1):
|
||||
t.shift(timedelta(seconds=x))
|
||||
return 42
|
||||
@@ -107,8 +107,12 @@ def test_throttle_sleep_time(mocker, default_conf, caplog) -> None:
|
||||
|
||||
sleep_mock.reset_mock()
|
||||
# Throttle for more than 5m (1 timeframe)
|
||||
assert worker._throttle(throttled_func, throttle_secs=400, timeframe='5m',
|
||||
timeframe_offset=0.4, x=5) == 42
|
||||
assert (
|
||||
worker._throttle(
|
||||
throttled_func, throttle_secs=400, timeframe="5m", timeframe_offset=0.4, x=5
|
||||
)
|
||||
== 42
|
||||
)
|
||||
assert sleep_mock.call_count == 1
|
||||
# 300 (5m) - 60 (1m - see set time above) - 5 (duration of throttled_func) = 235
|
||||
assert 235.2 < sleep_mock.call_args[0][0] < 235.6
|
||||
@@ -117,8 +121,12 @@ def test_throttle_sleep_time(mocker, default_conf, caplog) -> None:
|
||||
sleep_mock.reset_mock()
|
||||
# Offset of 5s, so we hit the sweet-spot between "candle" and "candle offset"
|
||||
# Which should not get a throttle iteration to avoid late candle fetching
|
||||
assert worker._throttle(throttled_func, throttle_secs=10, timeframe='5m',
|
||||
timeframe_offset=5, x=1.2) == 42
|
||||
assert (
|
||||
worker._throttle(
|
||||
throttled_func, throttle_secs=10, timeframe="5m", timeframe_offset=5, x=1.2
|
||||
)
|
||||
== 42
|
||||
)
|
||||
assert sleep_mock.call_count == 1
|
||||
# Time is slightly bigger than throttle secs due to the high timeframe offset.
|
||||
assert 11.1 < sleep_mock.call_args[0][0] < 13.2
|
||||
@@ -141,7 +149,7 @@ def test_worker_heartbeat_running(default_conf, mocker, caplog):
|
||||
message = r"Bot heartbeat\. PID=.*state='RUNNING'"
|
||||
|
||||
mock_throttle = MagicMock()
|
||||
mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)
|
||||
mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle)
|
||||
worker = get_patched_worker(mocker, default_conf)
|
||||
|
||||
worker.freqtrade.state = State.RUNNING
|
||||
@@ -164,7 +172,7 @@ def test_worker_heartbeat_stopped(default_conf, mocker, caplog):
|
||||
message = r"Bot heartbeat\. PID=.*state='STOPPED'"
|
||||
|
||||
mock_throttle = MagicMock()
|
||||
mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle)
|
||||
mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle)
|
||||
worker = get_patched_worker(mocker, default_conf)
|
||||
|
||||
worker.freqtrade.state = State.STOPPED
|
||||
|
||||
@@ -3,25 +3,31 @@ import pytest
|
||||
from freqtrade.enums import CandleType
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input,expected', [
|
||||
('', CandleType.SPOT),
|
||||
('spot', CandleType.SPOT),
|
||||
(CandleType.SPOT, CandleType.SPOT),
|
||||
(CandleType.FUTURES, CandleType.FUTURES),
|
||||
(CandleType.INDEX, CandleType.INDEX),
|
||||
(CandleType.MARK, CandleType.MARK),
|
||||
('futures', CandleType.FUTURES),
|
||||
('mark', CandleType.MARK),
|
||||
('premiumIndex', CandleType.PREMIUMINDEX),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"input,expected",
|
||||
[
|
||||
("", CandleType.SPOT),
|
||||
("spot", CandleType.SPOT),
|
||||
(CandleType.SPOT, CandleType.SPOT),
|
||||
(CandleType.FUTURES, CandleType.FUTURES),
|
||||
(CandleType.INDEX, CandleType.INDEX),
|
||||
(CandleType.MARK, CandleType.MARK),
|
||||
("futures", CandleType.FUTURES),
|
||||
("mark", CandleType.MARK),
|
||||
("premiumIndex", CandleType.PREMIUMINDEX),
|
||||
],
|
||||
)
|
||||
def test_CandleType_from_string(input, expected):
|
||||
assert CandleType.from_string(input) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input,expected', [
|
||||
('futures', CandleType.FUTURES),
|
||||
('spot', CandleType.SPOT),
|
||||
('margin', CandleType.SPOT),
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"input,expected",
|
||||
[
|
||||
("futures", CandleType.FUTURES),
|
||||
("spot", CandleType.SPOT),
|
||||
("margin", CandleType.SPOT),
|
||||
],
|
||||
)
|
||||
def test_CandleType_get_default(input, expected):
|
||||
assert CandleType.get_default(input) == expected
|
||||
|
||||
@@ -15,17 +15,18 @@ from tests.conftest import EXMS, get_args, log_has_re, patch_exchange
|
||||
|
||||
@pytest.fixture
|
||||
def lookahead_conf(default_conf_usdt, tmp_path):
|
||||
default_conf_usdt['user_data_dir'] = tmp_path
|
||||
default_conf_usdt['minimum_trade_amount'] = 10
|
||||
default_conf_usdt['targeted_trade_amount'] = 20
|
||||
default_conf_usdt['timerange'] = '20220101-20220501'
|
||||
default_conf_usdt["user_data_dir"] = tmp_path
|
||||
default_conf_usdt["minimum_trade_amount"] = 10
|
||||
default_conf_usdt["targeted_trade_amount"] = 20
|
||||
default_conf_usdt["timerange"] = "20220101-20220501"
|
||||
|
||||
default_conf_usdt['strategy_path'] = str(
|
||||
Path(__file__).parent.parent / "strategy/strats/lookahead_bias")
|
||||
default_conf_usdt['strategy'] = 'strategy_test_v3_with_lookahead_bias'
|
||||
default_conf_usdt['max_open_trades'] = 1
|
||||
default_conf_usdt['dry_run_wallet'] = 1000000000
|
||||
default_conf_usdt['pairs'] = ['UNITTEST/USDT']
|
||||
default_conf_usdt["strategy_path"] = str(
|
||||
Path(__file__).parent.parent / "strategy/strats/lookahead_bias"
|
||||
)
|
||||
default_conf_usdt["strategy"] = "strategy_test_v3_with_lookahead_bias"
|
||||
default_conf_usdt["max_open_trades"] = 1
|
||||
default_conf_usdt["dry_run_wallet"] = 1000000000
|
||||
default_conf_usdt["pairs"] = ["UNITTEST/USDT"]
|
||||
return default_conf_usdt
|
||||
|
||||
|
||||
@@ -33,7 +34,7 @@ def test_start_lookahead_analysis(mocker):
|
||||
single_mock = MagicMock()
|
||||
text_table_mock = MagicMock()
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions',
|
||||
"freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions",
|
||||
initialize_single_lookahead_analysis=single_mock,
|
||||
text_table_lookahead_analysis_instances=text_table_mock,
|
||||
)
|
||||
@@ -48,10 +49,10 @@ def test_start_lookahead_analysis(mocker):
|
||||
"--max-open-trades",
|
||||
"1",
|
||||
"--timerange",
|
||||
"20220101-20220201"
|
||||
"20220101-20220201",
|
||||
]
|
||||
pargs = get_args(args)
|
||||
pargs['config'] = None
|
||||
pargs["config"] = None
|
||||
|
||||
start_lookahead_analysis(pargs)
|
||||
assert single_mock.call_count == 1
|
||||
@@ -72,9 +73,11 @@ def test_start_lookahead_analysis(mocker):
|
||||
"20",
|
||||
]
|
||||
pargs = get_args(args)
|
||||
pargs['config'] = None
|
||||
with pytest.raises(OperationalException,
|
||||
match=r"Targeted trade amount can't be smaller than minimum trade amount.*"):
|
||||
pargs["config"] = None
|
||||
with pytest.raises(
|
||||
OperationalException,
|
||||
match=r"Targeted trade amount can't be smaller than minimum trade amount.*",
|
||||
):
|
||||
start_lookahead_analysis(pargs)
|
||||
|
||||
# Missing timerange
|
||||
@@ -90,27 +93,27 @@ def test_start_lookahead_analysis(mocker):
|
||||
"1",
|
||||
]
|
||||
pargs = get_args(args)
|
||||
pargs['config'] = None
|
||||
with pytest.raises(OperationalException,
|
||||
match=r"Please set a timerange\..*"):
|
||||
pargs["config"] = None
|
||||
with pytest.raises(OperationalException, match=r"Please set a timerange\..*"):
|
||||
start_lookahead_analysis(pargs)
|
||||
|
||||
|
||||
def test_lookahead_helper_invalid_config(lookahead_conf) -> None:
|
||||
conf = deepcopy(lookahead_conf)
|
||||
conf['targeted_trade_amount'] = 10
|
||||
conf['minimum_trade_amount'] = 40
|
||||
with pytest.raises(OperationalException,
|
||||
match=r"Targeted trade amount can't be smaller than minimum trade amount.*"):
|
||||
conf["targeted_trade_amount"] = 10
|
||||
conf["minimum_trade_amount"] = 40
|
||||
with pytest.raises(
|
||||
OperationalException,
|
||||
match=r"Targeted trade amount can't be smaller than minimum trade amount.*",
|
||||
):
|
||||
LookaheadAnalysisSubFunctions.start(conf)
|
||||
|
||||
|
||||
def test_lookahead_helper_no_strategy_defined(lookahead_conf):
|
||||
conf = deepcopy(lookahead_conf)
|
||||
conf['pairs'] = ['UNITTEST/USDT']
|
||||
del conf['strategy']
|
||||
with pytest.raises(OperationalException,
|
||||
match=r"No Strategy specified"):
|
||||
conf["pairs"] = ["UNITTEST/USDT"]
|
||||
del conf["strategy"]
|
||||
with pytest.raises(OperationalException, match=r"No Strategy specified"):
|
||||
LookaheadAnalysisSubFunctions.start(conf)
|
||||
|
||||
|
||||
@@ -118,7 +121,7 @@ def test_lookahead_helper_start(lookahead_conf, mocker) -> None:
|
||||
single_mock = MagicMock()
|
||||
text_table_mock = MagicMock()
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions',
|
||||
"freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions",
|
||||
initialize_single_lookahead_analysis=single_mock,
|
||||
text_table_lookahead_analysis_instances=text_table_mock,
|
||||
)
|
||||
@@ -138,19 +141,20 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
|
||||
analysis.false_exit_signals = 3
|
||||
|
||||
strategy_obj = {
|
||||
'name': "strategy_test_v3_with_lookahead_bias",
|
||||
'location': Path(lookahead_conf['strategy_path'], f"{lookahead_conf['strategy']}.py")
|
||||
"name": "strategy_test_v3_with_lookahead_bias",
|
||||
"location": Path(lookahead_conf["strategy_path"], f"{lookahead_conf['strategy']}.py"),
|
||||
}
|
||||
|
||||
instance = LookaheadAnalysis(lookahead_conf, strategy_obj)
|
||||
instance.current_analysis = analysis
|
||||
_table, _headers, data = (LookaheadAnalysisSubFunctions.
|
||||
text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
|
||||
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
|
||||
lookahead_conf, [instance]
|
||||
)
|
||||
|
||||
# check row contents for a try that has too few signals
|
||||
assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py'
|
||||
assert data[0][1] == 'strategy_test_v3_with_lookahead_bias'
|
||||
assert data[0][2].__contains__('too few trades')
|
||||
assert data[0][0] == "strategy_test_v3_with_lookahead_bias.py"
|
||||
assert data[0][1] == "strategy_test_v3_with_lookahead_bias"
|
||||
assert data[0][2].__contains__("too few trades")
|
||||
assert len(data[0]) == 3
|
||||
|
||||
# now check for an error which occurred after enough trades
|
||||
@@ -159,46 +163,51 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
|
||||
analysis.false_exit_signals = 10
|
||||
instance = LookaheadAnalysis(lookahead_conf, strategy_obj)
|
||||
instance.current_analysis = analysis
|
||||
_table, _headers, data = (LookaheadAnalysisSubFunctions.
|
||||
text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
|
||||
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
|
||||
lookahead_conf, [instance]
|
||||
)
|
||||
assert data[0][2].__contains__("error")
|
||||
|
||||
# edit it into not showing an error
|
||||
instance.failed_bias_check = False
|
||||
_table, _headers, data = (LookaheadAnalysisSubFunctions.
|
||||
text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
|
||||
assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py'
|
||||
assert data[0][1] == 'strategy_test_v3_with_lookahead_bias'
|
||||
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
|
||||
lookahead_conf, [instance]
|
||||
)
|
||||
assert data[0][0] == "strategy_test_v3_with_lookahead_bias.py"
|
||||
assert data[0][1] == "strategy_test_v3_with_lookahead_bias"
|
||||
assert data[0][2] # True
|
||||
assert data[0][3] == 12
|
||||
assert data[0][4] == 11
|
||||
assert data[0][5] == 10
|
||||
assert data[0][6] == ''
|
||||
assert data[0][6] == ""
|
||||
|
||||
analysis.false_indicators.append('falseIndicator1')
|
||||
analysis.false_indicators.append('falseIndicator2')
|
||||
_table, _headers, data = (LookaheadAnalysisSubFunctions.
|
||||
text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
|
||||
analysis.false_indicators.append("falseIndicator1")
|
||||
analysis.false_indicators.append("falseIndicator2")
|
||||
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
|
||||
lookahead_conf, [instance]
|
||||
)
|
||||
|
||||
assert data[0][6] == 'falseIndicator1, falseIndicator2'
|
||||
assert data[0][6] == "falseIndicator1, falseIndicator2"
|
||||
|
||||
# check amount of returning rows
|
||||
assert len(data) == 1
|
||||
|
||||
# check amount of multiple rows
|
||||
_table, _headers, data = (LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
|
||||
lookahead_conf, [instance, instance, instance]))
|
||||
_table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
|
||||
lookahead_conf, [instance, instance, instance]
|
||||
)
|
||||
assert len(data) == 3
|
||||
|
||||
|
||||
def test_lookahead_helper_export_to_csv(lookahead_conf):
|
||||
import pandas as pd
|
||||
lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv"
|
||||
|
||||
lookahead_conf["lookahead_analysis_exportfilename"] = "temp_csv_lookahead_analysis.csv"
|
||||
|
||||
# just to be sure the test won't fail: remove file if exists for some reason
|
||||
# (repeat this at the end once again to clean up)
|
||||
if Path(lookahead_conf['lookahead_analysis_exportfilename']).exists():
|
||||
Path(lookahead_conf['lookahead_analysis_exportfilename']).unlink()
|
||||
if Path(lookahead_conf["lookahead_analysis_exportfilename"]).exists():
|
||||
Path(lookahead_conf["lookahead_analysis_exportfilename"]).unlink()
|
||||
|
||||
# before we can start we have to delete the
|
||||
|
||||
@@ -208,13 +217,13 @@ def test_lookahead_helper_export_to_csv(lookahead_conf):
|
||||
analysis1.total_signals = 12
|
||||
analysis1.false_entry_signals = 11
|
||||
analysis1.false_exit_signals = 10
|
||||
analysis1.false_indicators.append('falseIndicator1')
|
||||
analysis1.false_indicators.append('falseIndicator2')
|
||||
lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv"
|
||||
analysis1.false_indicators.append("falseIndicator1")
|
||||
analysis1.false_indicators.append("falseIndicator2")
|
||||
lookahead_conf["lookahead_analysis_exportfilename"] = "temp_csv_lookahead_analysis.csv"
|
||||
|
||||
strategy_obj1 = {
|
||||
'name': "strat1",
|
||||
'location': Path("file1.py"),
|
||||
"name": "strat1",
|
||||
"location": Path("file1.py"),
|
||||
}
|
||||
|
||||
instance1 = LookaheadAnalysis(lookahead_conf, strategy_obj1)
|
||||
@@ -222,30 +231,28 @@ def test_lookahead_helper_export_to_csv(lookahead_conf):
|
||||
instance1.current_analysis = analysis1
|
||||
|
||||
LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance1])
|
||||
saved_data1 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename'])
|
||||
saved_data1 = pd.read_csv(lookahead_conf["lookahead_analysis_exportfilename"])
|
||||
|
||||
expected_values1 = [
|
||||
[
|
||||
'file1.py', 'strat1', True,
|
||||
12, 11, 10,
|
||||
"falseIndicator1,falseIndicator2"
|
||||
],
|
||||
["file1.py", "strat1", True, 12, 11, 10, "falseIndicator1,falseIndicator2"],
|
||||
]
|
||||
expected_columns = [
|
||||
"filename",
|
||||
"strategy",
|
||||
"has_bias",
|
||||
"total_signals",
|
||||
"biased_entry_signals",
|
||||
"biased_exit_signals",
|
||||
"biased_indicators",
|
||||
]
|
||||
expected_columns = ['filename', 'strategy', 'has_bias',
|
||||
'total_signals', 'biased_entry_signals', 'biased_exit_signals',
|
||||
'biased_indicators']
|
||||
expected_data1 = pd.DataFrame(expected_values1, columns=expected_columns)
|
||||
|
||||
assert Path(lookahead_conf['lookahead_analysis_exportfilename']).exists()
|
||||
assert Path(lookahead_conf["lookahead_analysis_exportfilename"]).exists()
|
||||
assert expected_data1.equals(saved_data1)
|
||||
|
||||
# 2nd check: update the same strategy (which internally changed or is being retested)
|
||||
expected_values2 = [
|
||||
[
|
||||
'file1.py', 'strat1', False,
|
||||
22, 21, 20,
|
||||
"falseIndicator3,falseIndicator4"
|
||||
],
|
||||
["file1.py", "strat1", False, 22, 21, 20, "falseIndicator3,falseIndicator4"],
|
||||
]
|
||||
expected_data2 = pd.DataFrame(expected_values2, columns=expected_columns)
|
||||
|
||||
@@ -254,12 +261,12 @@ def test_lookahead_helper_export_to_csv(lookahead_conf):
|
||||
analysis2.total_signals = 22
|
||||
analysis2.false_entry_signals = 21
|
||||
analysis2.false_exit_signals = 20
|
||||
analysis2.false_indicators.append('falseIndicator3')
|
||||
analysis2.false_indicators.append('falseIndicator4')
|
||||
analysis2.false_indicators.append("falseIndicator3")
|
||||
analysis2.false_indicators.append("falseIndicator4")
|
||||
|
||||
strategy_obj2 = {
|
||||
'name': "strat1",
|
||||
'location': Path("file1.py"),
|
||||
"name": "strat1",
|
||||
"location": Path("file1.py"),
|
||||
}
|
||||
|
||||
instance2 = LookaheadAnalysis(lookahead_conf, strategy_obj2)
|
||||
@@ -267,21 +274,14 @@ def test_lookahead_helper_export_to_csv(lookahead_conf):
|
||||
instance2.current_analysis = analysis2
|
||||
|
||||
LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance2])
|
||||
saved_data2 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename'])
|
||||
saved_data2 = pd.read_csv(lookahead_conf["lookahead_analysis_exportfilename"])
|
||||
|
||||
assert expected_data2.equals(saved_data2)
|
||||
|
||||
# 3rd check: now we add a new row to an already existing file
|
||||
expected_values3 = [
|
||||
[
|
||||
'file1.py', 'strat1', False,
|
||||
22, 21, 20,
|
||||
"falseIndicator3,falseIndicator4"
|
||||
],
|
||||
[
|
||||
'file3.py', 'strat3', True,
|
||||
32, 31, 30, "falseIndicator5,falseIndicator6"
|
||||
],
|
||||
["file1.py", "strat1", False, 22, 21, 20, "falseIndicator3,falseIndicator4"],
|
||||
["file3.py", "strat3", True, 32, 31, 30, "falseIndicator5,falseIndicator6"],
|
||||
]
|
||||
|
||||
expected_data3 = pd.DataFrame(expected_values3, columns=expected_columns)
|
||||
@@ -291,13 +291,13 @@ def test_lookahead_helper_export_to_csv(lookahead_conf):
|
||||
analysis3.total_signals = 32
|
||||
analysis3.false_entry_signals = 31
|
||||
analysis3.false_exit_signals = 30
|
||||
analysis3.false_indicators.append('falseIndicator5')
|
||||
analysis3.false_indicators.append('falseIndicator6')
|
||||
lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv"
|
||||
analysis3.false_indicators.append("falseIndicator5")
|
||||
analysis3.false_indicators.append("falseIndicator6")
|
||||
lookahead_conf["lookahead_analysis_exportfilename"] = "temp_csv_lookahead_analysis.csv"
|
||||
|
||||
strategy_obj3 = {
|
||||
'name': "strat3",
|
||||
'location': Path("file3.py"),
|
||||
"name": "strat3",
|
||||
"location": Path("file3.py"),
|
||||
}
|
||||
|
||||
instance3 = LookaheadAnalysis(lookahead_conf, strategy_obj3)
|
||||
@@ -305,67 +305,66 @@ def test_lookahead_helper_export_to_csv(lookahead_conf):
|
||||
instance3.current_analysis = analysis3
|
||||
|
||||
LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance3])
|
||||
saved_data3 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename'])
|
||||
saved_data3 = pd.read_csv(lookahead_conf["lookahead_analysis_exportfilename"])
|
||||
assert expected_data3.equals(saved_data3)
|
||||
|
||||
# remove csv file after the test is done
|
||||
if Path(lookahead_conf['lookahead_analysis_exportfilename']).exists():
|
||||
Path(lookahead_conf['lookahead_analysis_exportfilename']).unlink()
|
||||
if Path(lookahead_conf["lookahead_analysis_exportfilename"]).exists():
|
||||
Path(lookahead_conf["lookahead_analysis_exportfilename"]).unlink()
|
||||
|
||||
|
||||
def test_initialize_single_lookahead_analysis(lookahead_conf, mocker, caplog):
|
||||
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
|
||||
mocker.patch(f'{EXMS}.get_fee', return_value=0.0)
|
||||
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.00001)
|
||||
mocker.patch(f'{EXMS}.get_max_pair_stake_amount', return_value=float('inf'))
|
||||
mocker.patch("freqtrade.data.history.get_timerange", get_timerange)
|
||||
mocker.patch(f"{EXMS}.get_fee", return_value=0.0)
|
||||
mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001)
|
||||
mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf"))
|
||||
patch_exchange(mocker)
|
||||
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||
PropertyMock(return_value=['UNITTEST/BTC']))
|
||||
lookahead_conf['pairs'] = ['UNITTEST/USDT']
|
||||
mocker.patch(
|
||||
"freqtrade.plugins.pairlistmanager.PairListManager.whitelist",
|
||||
PropertyMock(return_value=["UNITTEST/BTC"]),
|
||||
)
|
||||
lookahead_conf["pairs"] = ["UNITTEST/USDT"]
|
||||
|
||||
lookahead_conf['timeframe'] = '5m'
|
||||
lookahead_conf['timerange'] = '20180119-20180122'
|
||||
start_mock = mocker.patch('freqtrade.optimize.analysis.lookahead.LookaheadAnalysis.start')
|
||||
lookahead_conf["timeframe"] = "5m"
|
||||
lookahead_conf["timerange"] = "20180119-20180122"
|
||||
start_mock = mocker.patch("freqtrade.optimize.analysis.lookahead.LookaheadAnalysis.start")
|
||||
strategy_obj = {
|
||||
'name': "strategy_test_v3_with_lookahead_bias",
|
||||
'location': Path(lookahead_conf['strategy_path'], f"{lookahead_conf['strategy']}.py")
|
||||
"name": "strategy_test_v3_with_lookahead_bias",
|
||||
"location": Path(lookahead_conf["strategy_path"], f"{lookahead_conf['strategy']}.py"),
|
||||
}
|
||||
|
||||
instance = LookaheadAnalysisSubFunctions.initialize_single_lookahead_analysis(
|
||||
lookahead_conf, strategy_obj)
|
||||
lookahead_conf, strategy_obj
|
||||
)
|
||||
assert log_has_re(r"Bias test of .* started\.", caplog)
|
||||
assert start_mock.call_count == 1
|
||||
|
||||
assert instance.strategy_obj['name'] == "strategy_test_v3_with_lookahead_bias"
|
||||
assert instance.strategy_obj["name"] == "strategy_test_v3_with_lookahead_bias"
|
||||
|
||||
|
||||
@pytest.mark.parametrize('scenario', [
|
||||
'no_bias', 'bias1'
|
||||
])
|
||||
@pytest.mark.parametrize("scenario", ["no_bias", "bias1"])
|
||||
def test_biased_strategy(lookahead_conf, mocker, caplog, scenario) -> None:
|
||||
patch_exchange(mocker)
|
||||
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
|
||||
mocker.patch(f'{EXMS}.get_fee', return_value=0.0)
|
||||
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.00001)
|
||||
mocker.patch(f'{EXMS}.get_max_pair_stake_amount', return_value=float('inf'))
|
||||
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||
PropertyMock(return_value=['UNITTEST/BTC']))
|
||||
lookahead_conf['pairs'] = ['UNITTEST/USDT']
|
||||
mocker.patch("freqtrade.data.history.get_timerange", get_timerange)
|
||||
mocker.patch(f"{EXMS}.get_fee", return_value=0.0)
|
||||
mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001)
|
||||
mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf"))
|
||||
mocker.patch(
|
||||
"freqtrade.plugins.pairlistmanager.PairListManager.whitelist",
|
||||
PropertyMock(return_value=["UNITTEST/BTC"]),
|
||||
)
|
||||
lookahead_conf["pairs"] = ["UNITTEST/USDT"]
|
||||
|
||||
lookahead_conf['timeframe'] = '5m'
|
||||
lookahead_conf['timerange'] = '20180119-20180122'
|
||||
lookahead_conf["timeframe"] = "5m"
|
||||
lookahead_conf["timerange"] = "20180119-20180122"
|
||||
|
||||
# Patch scenario Parameter to allow for easy selection
|
||||
mocker.patch('freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file',
|
||||
return_value={
|
||||
'params': {
|
||||
"buy": {
|
||||
"scenario": scenario
|
||||
}
|
||||
}
|
||||
})
|
||||
mocker.patch(
|
||||
"freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file",
|
||||
return_value={"params": {"buy": {"scenario": scenario}}},
|
||||
)
|
||||
|
||||
strategy_obj = {'name': "strategy_test_v3_with_lookahead_bias"}
|
||||
strategy_obj = {"name": "strategy_test_v3_with_lookahead_bias"}
|
||||
instance = LookaheadAnalysis(lookahead_conf, strategy_obj)
|
||||
instance.start()
|
||||
# Assert init correct
|
||||
@@ -380,10 +379,10 @@ def test_biased_strategy(lookahead_conf, mocker, caplog, scenario) -> None:
|
||||
|
||||
|
||||
def test_config_overrides(lookahead_conf):
|
||||
lookahead_conf['max_open_trades'] = 0
|
||||
lookahead_conf['dry_run_wallet'] = 1
|
||||
lookahead_conf['pairs'] = ['BTC/USDT', 'ETH/USDT', 'SOL/USDT']
|
||||
lookahead_conf["max_open_trades"] = 0
|
||||
lookahead_conf["dry_run_wallet"] = 1
|
||||
lookahead_conf["pairs"] = ["BTC/USDT", "ETH/USDT", "SOL/USDT"]
|
||||
lookahead_conf = LookaheadAnalysisSubFunctions.calculate_config_overrides(lookahead_conf)
|
||||
|
||||
assert lookahead_conf['dry_run_wallet'] == 1000000000
|
||||
assert lookahead_conf['max_open_trades'] == 3
|
||||
assert lookahead_conf["dry_run_wallet"] == 1000000000
|
||||
assert lookahead_conf["max_open_trades"] == 3
|
||||
|
||||
@@ -15,14 +15,13 @@ from tests.conftest import EXMS, get_args, log_has_re, patch_exchange
|
||||
|
||||
@pytest.fixture
|
||||
def recursive_conf(default_conf_usdt, tmp_path):
|
||||
default_conf_usdt['user_data_dir'] = tmp_path
|
||||
default_conf_usdt['timerange'] = '20220101-20220501'
|
||||
default_conf_usdt["user_data_dir"] = tmp_path
|
||||
default_conf_usdt["timerange"] = "20220101-20220501"
|
||||
|
||||
default_conf_usdt['strategy_path'] = str(
|
||||
Path(__file__).parent.parent / "strategy/strats")
|
||||
default_conf_usdt['strategy'] = 'strategy_test_v3_recursive_issue'
|
||||
default_conf_usdt['pairs'] = ['UNITTEST/USDT']
|
||||
default_conf_usdt['startup_candle'] = [100]
|
||||
default_conf_usdt["strategy_path"] = str(Path(__file__).parent.parent / "strategy/strats")
|
||||
default_conf_usdt["strategy"] = "strategy_test_v3_recursive_issue"
|
||||
default_conf_usdt["pairs"] = ["UNITTEST/USDT"]
|
||||
default_conf_usdt["startup_candle"] = [100]
|
||||
return default_conf_usdt
|
||||
|
||||
|
||||
@@ -30,7 +29,7 @@ def test_start_recursive_analysis(mocker):
|
||||
single_mock = MagicMock()
|
||||
text_table_mock = MagicMock()
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions',
|
||||
"freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions",
|
||||
initialize_single_recursive_analysis=single_mock,
|
||||
text_table_recursive_analysis_instances=text_table_mock,
|
||||
)
|
||||
@@ -43,10 +42,10 @@ def test_start_recursive_analysis(mocker):
|
||||
"--pairs",
|
||||
"UNITTEST/BTC",
|
||||
"--timerange",
|
||||
"20220101-20220201"
|
||||
"20220101-20220201",
|
||||
]
|
||||
pargs = get_args(args)
|
||||
pargs['config'] = None
|
||||
pargs["config"] = None
|
||||
|
||||
start_recursive_analysis(pargs)
|
||||
assert single_mock.call_count == 1
|
||||
@@ -62,21 +61,19 @@ def test_start_recursive_analysis(mocker):
|
||||
"--strategy-path",
|
||||
str(Path(__file__).parent.parent / "strategy/strats"),
|
||||
"--pairs",
|
||||
"UNITTEST/BTC"
|
||||
"UNITTEST/BTC",
|
||||
]
|
||||
pargs = get_args(args)
|
||||
pargs['config'] = None
|
||||
with pytest.raises(OperationalException,
|
||||
match=r"Please set a timerange\..*"):
|
||||
pargs["config"] = None
|
||||
with pytest.raises(OperationalException, match=r"Please set a timerange\..*"):
|
||||
start_recursive_analysis(pargs)
|
||||
|
||||
|
||||
def test_recursive_helper_no_strategy_defined(recursive_conf):
|
||||
conf = deepcopy(recursive_conf)
|
||||
conf['pairs'] = ['UNITTEST/USDT']
|
||||
del conf['strategy']
|
||||
with pytest.raises(OperationalException,
|
||||
match=r"No Strategy specified"):
|
||||
conf["pairs"] = ["UNITTEST/USDT"]
|
||||
del conf["strategy"]
|
||||
with pytest.raises(OperationalException, match=r"No Strategy specified"):
|
||||
RecursiveAnalysisSubFunctions.start(conf)
|
||||
|
||||
|
||||
@@ -84,7 +81,7 @@ def test_recursive_helper_start(recursive_conf, mocker) -> None:
|
||||
single_mock = MagicMock()
|
||||
text_table_mock = MagicMock()
|
||||
mocker.patch.multiple(
|
||||
'freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions',
|
||||
"freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions",
|
||||
initialize_single_recursive_analysis=single_mock,
|
||||
text_table_recursive_analysis_instances=text_table_mock,
|
||||
)
|
||||
@@ -98,82 +95,83 @@ def test_recursive_helper_start(recursive_conf, mocker) -> None:
|
||||
|
||||
def test_recursive_helper_text_table_recursive_analysis_instances(recursive_conf):
|
||||
dict_diff = dict()
|
||||
dict_diff['rsi'] = {}
|
||||
dict_diff['rsi'][100] = "0.078%"
|
||||
dict_diff["rsi"] = {}
|
||||
dict_diff["rsi"][100] = "0.078%"
|
||||
|
||||
strategy_obj = {
|
||||
'name': "strategy_test_v3_recursive_issue",
|
||||
'location': Path(recursive_conf['strategy_path'], f"{recursive_conf['strategy']}.py")
|
||||
"name": "strategy_test_v3_recursive_issue",
|
||||
"location": Path(recursive_conf["strategy_path"], f"{recursive_conf['strategy']}.py"),
|
||||
}
|
||||
|
||||
instance = RecursiveAnalysis(recursive_conf, strategy_obj)
|
||||
instance.dict_recursive = dict_diff
|
||||
_table, _headers, data = (RecursiveAnalysisSubFunctions.
|
||||
text_table_recursive_analysis_instances([instance]))
|
||||
_table, _headers, data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances(
|
||||
[instance]
|
||||
)
|
||||
|
||||
# check row contents for a try that has too few signals
|
||||
assert data[0][0] == 'rsi'
|
||||
assert data[0][1] == '0.078%'
|
||||
assert data[0][0] == "rsi"
|
||||
assert data[0][1] == "0.078%"
|
||||
assert len(data[0]) == 2
|
||||
|
||||
# now check when there is no issue
|
||||
dict_diff = dict()
|
||||
instance = RecursiveAnalysis(recursive_conf, strategy_obj)
|
||||
instance.dict_recursive = dict_diff
|
||||
_table, _headers, data = (RecursiveAnalysisSubFunctions.
|
||||
text_table_recursive_analysis_instances([instance]))
|
||||
_table, _headers, data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances(
|
||||
[instance]
|
||||
)
|
||||
assert len(data) == 0
|
||||
|
||||
|
||||
def test_initialize_single_recursive_analysis(recursive_conf, mocker, caplog):
|
||||
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
|
||||
mocker.patch("freqtrade.data.history.get_timerange", get_timerange)
|
||||
patch_exchange(mocker)
|
||||
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||
PropertyMock(return_value=['UNITTEST/BTC']))
|
||||
recursive_conf['pairs'] = ['UNITTEST/BTC']
|
||||
mocker.patch(
|
||||
"freqtrade.plugins.pairlistmanager.PairListManager.whitelist",
|
||||
PropertyMock(return_value=["UNITTEST/BTC"]),
|
||||
)
|
||||
recursive_conf["pairs"] = ["UNITTEST/BTC"]
|
||||
|
||||
recursive_conf['timeframe'] = '5m'
|
||||
recursive_conf['timerange'] = '20180119-20180122'
|
||||
start_mock = mocker.patch('freqtrade.optimize.analysis.recursive.RecursiveAnalysis.start')
|
||||
recursive_conf["timeframe"] = "5m"
|
||||
recursive_conf["timerange"] = "20180119-20180122"
|
||||
start_mock = mocker.patch("freqtrade.optimize.analysis.recursive.RecursiveAnalysis.start")
|
||||
strategy_obj = {
|
||||
'name': "strategy_test_v3_recursive_issue",
|
||||
'location': Path(recursive_conf['strategy_path'], f"{recursive_conf['strategy']}.py")
|
||||
"name": "strategy_test_v3_recursive_issue",
|
||||
"location": Path(recursive_conf["strategy_path"], f"{recursive_conf['strategy']}.py"),
|
||||
}
|
||||
|
||||
instance = RecursiveAnalysisSubFunctions.initialize_single_recursive_analysis(
|
||||
recursive_conf, strategy_obj)
|
||||
recursive_conf, strategy_obj
|
||||
)
|
||||
assert log_has_re(r"Recursive test of .* started\.", caplog)
|
||||
assert start_mock.call_count == 1
|
||||
|
||||
assert instance.strategy_obj['name'] == "strategy_test_v3_recursive_issue"
|
||||
assert instance.strategy_obj["name"] == "strategy_test_v3_recursive_issue"
|
||||
|
||||
|
||||
@pytest.mark.parametrize('scenario', [
|
||||
'no_bias', 'bias1', 'bias2'
|
||||
])
|
||||
@pytest.mark.parametrize("scenario", ["no_bias", "bias1", "bias2"])
|
||||
def test_recursive_biased_strategy(recursive_conf, mocker, caplog, scenario) -> None:
|
||||
patch_exchange(mocker)
|
||||
mocker.patch(f'{EXMS}.get_fee', return_value=0.0)
|
||||
mocker.patch('freqtrade.data.history.get_timerange', get_timerange)
|
||||
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
|
||||
PropertyMock(return_value=['UNITTEST/BTC']))
|
||||
recursive_conf['pairs'] = ['UNITTEST/BTC']
|
||||
mocker.patch(f"{EXMS}.get_fee", return_value=0.0)
|
||||
mocker.patch("freqtrade.data.history.get_timerange", get_timerange)
|
||||
mocker.patch(
|
||||
"freqtrade.plugins.pairlistmanager.PairListManager.whitelist",
|
||||
PropertyMock(return_value=["UNITTEST/BTC"]),
|
||||
)
|
||||
recursive_conf["pairs"] = ["UNITTEST/BTC"]
|
||||
|
||||
recursive_conf['timeframe'] = '5m'
|
||||
recursive_conf['timerange'] = '20180119-20180122'
|
||||
recursive_conf['startup_candle'] = [100]
|
||||
recursive_conf["timeframe"] = "5m"
|
||||
recursive_conf["timerange"] = "20180119-20180122"
|
||||
recursive_conf["startup_candle"] = [100]
|
||||
|
||||
# Patch scenario Parameter to allow for easy selection
|
||||
mocker.patch('freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file',
|
||||
return_value={
|
||||
'params': {
|
||||
"buy": {
|
||||
"scenario": scenario
|
||||
}
|
||||
}
|
||||
})
|
||||
mocker.patch(
|
||||
"freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file",
|
||||
return_value={"params": {"buy": {"scenario": scenario}}},
|
||||
)
|
||||
|
||||
strategy_obj = {'name': "strategy_test_v3_recursive_issue"}
|
||||
strategy_obj = {"name": "strategy_test_v3_recursive_issue"}
|
||||
instance = RecursiveAnalysis(recursive_conf, strategy_obj)
|
||||
instance.start()
|
||||
# Assert init correct
|
||||
@@ -181,7 +179,7 @@ def test_recursive_biased_strategy(recursive_conf, mocker, caplog, scenario) ->
|
||||
|
||||
if scenario == "bias2":
|
||||
assert log_has_re("=> found lookahead in indicator rsi", caplog)
|
||||
diff_pct = abs(float(instance.dict_recursive['rsi'][100].replace("%", "")))
|
||||
diff_pct = abs(float(instance.dict_recursive["rsi"][100].replace("%", "")))
|
||||
# check non-biased strategy
|
||||
if scenario == "no_bias":
|
||||
assert diff_pct < 0.01
|
||||
|
||||
@@ -3,12 +3,12 @@ import pytest
|
||||
from freqtrade.persistence import FtNoDBContext, PairLocks, Trade
|
||||
|
||||
|
||||
@pytest.mark.parametrize('timeframe', ['', '5m', '1d'])
|
||||
@pytest.mark.parametrize("timeframe", ["", "5m", "1d"])
|
||||
def test_FtNoDBContext(timeframe):
|
||||
PairLocks.timeframe = ''
|
||||
PairLocks.timeframe = ""
|
||||
assert Trade.use_db is True
|
||||
assert PairLocks.use_db is True
|
||||
assert PairLocks.timeframe == ''
|
||||
assert PairLocks.timeframe == ""
|
||||
|
||||
with FtNoDBContext(timeframe):
|
||||
assert Trade.use_db is False
|
||||
@@ -18,7 +18,7 @@ def test_FtNoDBContext(timeframe):
|
||||
with FtNoDBContext():
|
||||
assert Trade.use_db is False
|
||||
assert PairLocks.use_db is False
|
||||
assert PairLocks.timeframe == ''
|
||||
assert PairLocks.timeframe == ""
|
||||
|
||||
assert Trade.use_db is True
|
||||
assert PairLocks.use_db is True
|
||||
|
||||
@@ -46,7 +46,7 @@ def test_key_value_store(time_machine):
|
||||
KeyValueStore.delete_value("test_float")
|
||||
|
||||
with pytest.raises(ValueError, match=r"Unknown value type"):
|
||||
KeyValueStore.store_value("test_float", {'some': 'dict'})
|
||||
KeyValueStore.store_value("test_float", {"some": "dict"})
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
|
||||
@@ -24,9 +24,9 @@ spot, margin, futures = TradingMode.SPOT, TradingMode.MARGIN, TradingMode.FUTURE
|
||||
|
||||
def test_init_create_session(default_conf):
|
||||
# Check if init create a session
|
||||
init_db(default_conf['db_url'])
|
||||
assert hasattr(Trade, 'session')
|
||||
assert 'scoped_session' in type(Trade.session).__name__
|
||||
init_db(default_conf["db_url"])
|
||||
assert hasattr(Trade, "session")
|
||||
assert "scoped_session" in type(Trade.session).__name__
|
||||
|
||||
|
||||
def test_init_custom_db_url(default_conf, tmp_path):
|
||||
@@ -34,43 +34,40 @@ def test_init_custom_db_url(default_conf, tmp_path):
|
||||
filename = tmp_path / "freqtrade2_test.sqlite"
|
||||
assert not filename.is_file()
|
||||
|
||||
default_conf.update({'db_url': f'sqlite:///{filename}'})
|
||||
default_conf.update({"db_url": f"sqlite:///{filename}"})
|
||||
|
||||
init_db(default_conf['db_url'])
|
||||
init_db(default_conf["db_url"])
|
||||
assert filename.is_file()
|
||||
r = Trade.session.execute(text("PRAGMA journal_mode"))
|
||||
assert r.first() == ('wal',)
|
||||
assert r.first() == ("wal",)
|
||||
|
||||
|
||||
def test_init_invalid_db_url():
|
||||
# Update path to a value other than default, but still in-memory
|
||||
with pytest.raises(OperationalException, match=r'.*no valid database URL*'):
|
||||
init_db('unknown:///some.url')
|
||||
with pytest.raises(OperationalException, match=r".*no valid database URL*"):
|
||||
init_db("unknown:///some.url")
|
||||
|
||||
with pytest.raises(OperationalException, match=r'Bad db-url.*For in-memory database, pl.*'):
|
||||
init_db('sqlite:///')
|
||||
with pytest.raises(OperationalException, match=r"Bad db-url.*For in-memory database, pl.*"):
|
||||
init_db("sqlite:///")
|
||||
|
||||
|
||||
def test_init_prod_db(default_conf, mocker):
|
||||
default_conf.update({'dry_run': False})
|
||||
default_conf.update({'db_url': DEFAULT_DB_PROD_URL})
|
||||
default_conf.update({"dry_run": False})
|
||||
default_conf.update({"db_url": DEFAULT_DB_PROD_URL})
|
||||
|
||||
create_engine_mock = mocker.patch('freqtrade.persistence.models.create_engine', MagicMock())
|
||||
create_engine_mock = mocker.patch("freqtrade.persistence.models.create_engine", MagicMock())
|
||||
|
||||
init_db(default_conf['db_url'])
|
||||
init_db(default_conf["db_url"])
|
||||
assert create_engine_mock.call_count == 1
|
||||
assert create_engine_mock.mock_calls[0][1][0] == 'sqlite:///tradesv3.sqlite'
|
||||
assert create_engine_mock.mock_calls[0][1][0] == "sqlite:///tradesv3.sqlite"
|
||||
|
||||
|
||||
def test_init_dryrun_db(default_conf, tmpdir):
|
||||
filename = f"{tmpdir}/freqtrade2_prod.sqlite"
|
||||
assert not Path(filename).is_file()
|
||||
default_conf.update({
|
||||
'dry_run': True,
|
||||
'db_url': f'sqlite:///{filename}'
|
||||
})
|
||||
default_conf.update({"dry_run": True, "db_url": f"sqlite:///{filename}"})
|
||||
|
||||
init_db(default_conf['db_url'])
|
||||
init_db(default_conf["db_url"])
|
||||
assert Path(filename).is_file()
|
||||
|
||||
|
||||
@@ -135,10 +132,9 @@ def test_migrate(mocker, default_conf, fee, caplog):
|
||||
'2019-11-28 12:44:24.000000',
|
||||
0.0, 0.0, 0.0, '5m',
|
||||
'buy_order', 'dry_stop_order_id222')
|
||||
""".format(fee=fee.return_value,
|
||||
stake=default_conf.get("stake_amount"),
|
||||
amount=amount
|
||||
)
|
||||
""".format(
|
||||
fee=fee.return_value, stake=default_conf.get("stake_amount"), amount=amount
|
||||
)
|
||||
insert_orders = f"""
|
||||
insert into orders (
|
||||
ft_trade_id,
|
||||
@@ -237,8 +233,8 @@ def test_migrate(mocker, default_conf, fee, caplog):
|
||||
{amount * 0.00258580}
|
||||
)
|
||||
"""
|
||||
engine = create_engine('sqlite://')
|
||||
mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine)
|
||||
engine = create_engine("sqlite://")
|
||||
mocker.patch("freqtrade.persistence.models.create_engine", lambda *args, **kwargs: engine)
|
||||
|
||||
# Create table using the old format
|
||||
with engine.begin() as connection:
|
||||
@@ -254,7 +250,7 @@ def test_migrate(mocker, default_conf, fee, caplog):
|
||||
|
||||
connection.execute(text("create table trades_bak1 as select * from trades"))
|
||||
# Run init to test migration
|
||||
init_db(default_conf['db_url'])
|
||||
init_db(default_conf["db_url"])
|
||||
|
||||
trades = Trade.session.scalars(select(Trade)).all()
|
||||
assert len(trades) == 1
|
||||
@@ -276,33 +272,35 @@ def test_migrate(mocker, default_conf, fee, caplog):
|
||||
assert trade.initial_stop_loss == 0.0
|
||||
assert trade.exit_reason is None
|
||||
assert trade.strategy is None
|
||||
assert trade.timeframe == '5m'
|
||||
assert trade.timeframe == "5m"
|
||||
assert log_has("trying trades_bak1", caplog)
|
||||
assert log_has("trying trades_bak2", caplog)
|
||||
assert log_has("Running database migration for trades - backup: trades_bak2, orders_bak0",
|
||||
caplog)
|
||||
assert log_has(
|
||||
"Running database migration for trades - backup: trades_bak2, orders_bak0", caplog
|
||||
)
|
||||
assert log_has("Database migration finished.", caplog)
|
||||
assert pytest.approx(trade.open_trade_value) == trade._calc_open_trade_value(
|
||||
trade.amount, trade.open_rate)
|
||||
trade.amount, trade.open_rate
|
||||
)
|
||||
assert trade.close_profit_abs is None
|
||||
assert trade.stake_amount == trade.max_stake_amount
|
||||
|
||||
orders = trade.orders
|
||||
assert len(orders) == 4
|
||||
assert orders[0].order_id == 'dry_buy_order'
|
||||
assert orders[0].ft_order_side == 'buy'
|
||||
assert orders[0].order_id == "dry_buy_order"
|
||||
assert orders[0].ft_order_side == "buy"
|
||||
|
||||
# All dry-run stoploss orders will be closed
|
||||
assert orders[-1].order_id == 'dry_stop_order_id222'
|
||||
assert orders[-1].ft_order_side == 'stoploss'
|
||||
assert orders[-1].order_id == "dry_stop_order_id222"
|
||||
assert orders[-1].ft_order_side == "stoploss"
|
||||
assert orders[-1].ft_is_open is False
|
||||
|
||||
assert orders[1].order_id == 'dry_buy_order22'
|
||||
assert orders[1].ft_order_side == 'buy'
|
||||
assert orders[1].order_id == "dry_buy_order22"
|
||||
assert orders[1].ft_order_side == "buy"
|
||||
assert orders[1].ft_is_open is True
|
||||
|
||||
assert orders[2].order_id == 'dry_stop_order_id11X'
|
||||
assert orders[2].ft_order_side == 'stoploss'
|
||||
assert orders[2].order_id == "dry_stop_order_id11X"
|
||||
assert orders[2].ft_order_side == "stoploss"
|
||||
assert orders[2].ft_is_open is False
|
||||
|
||||
orders1 = Order.session.scalars(select(Order)).all()
|
||||
@@ -342,12 +340,11 @@ def test_migrate_too_old(mocker, default_conf, fee, caplog):
|
||||
VALUES ('binance', 'ETC/BTC', 1, {fee}, {fee},
|
||||
0.00258580, {stake}, {amount},
|
||||
'2019-11-28 12:44:24.000000')
|
||||
""".format(fee=fee.return_value,
|
||||
stake=default_conf.get("stake_amount"),
|
||||
amount=amount
|
||||
)
|
||||
engine = create_engine('sqlite://')
|
||||
mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine)
|
||||
""".format(
|
||||
fee=fee.return_value, stake=default_conf.get("stake_amount"), amount=amount
|
||||
)
|
||||
engine = create_engine("sqlite://")
|
||||
mocker.patch("freqtrade.persistence.models.create_engine", lambda *args, **kwargs: engine)
|
||||
|
||||
# Create table using the old format
|
||||
with engine.begin() as connection:
|
||||
@@ -355,22 +352,22 @@ def test_migrate_too_old(mocker, default_conf, fee, caplog):
|
||||
connection.execute(text(insert_table_old))
|
||||
|
||||
# Run init to test migration
|
||||
with pytest.raises(OperationalException, match=r'Your database seems to be very old'):
|
||||
init_db(default_conf['db_url'])
|
||||
with pytest.raises(OperationalException, match=r"Your database seems to be very old"):
|
||||
init_db(default_conf["db_url"])
|
||||
|
||||
|
||||
def test_migrate_get_last_sequence_ids():
|
||||
engine = MagicMock()
|
||||
engine.begin = MagicMock()
|
||||
engine.name = 'postgresql'
|
||||
get_last_sequence_ids(engine, 'trades_bak', 'orders_bak')
|
||||
engine.name = "postgresql"
|
||||
get_last_sequence_ids(engine, "trades_bak", "orders_bak")
|
||||
|
||||
assert engine.begin.call_count == 2
|
||||
engine.reset_mock()
|
||||
engine.begin.reset_mock()
|
||||
|
||||
engine.name = 'somethingelse'
|
||||
get_last_sequence_ids(engine, 'trades_bak', 'orders_bak')
|
||||
engine.name = "somethingelse"
|
||||
get_last_sequence_ids(engine, "trades_bak", "orders_bak")
|
||||
|
||||
assert engine.begin.call_count == 0
|
||||
|
||||
@@ -378,14 +375,14 @@ def test_migrate_get_last_sequence_ids():
|
||||
def test_migrate_set_sequence_ids():
|
||||
engine = MagicMock()
|
||||
engine.begin = MagicMock()
|
||||
engine.name = 'postgresql'
|
||||
engine.name = "postgresql"
|
||||
set_sequence_ids(engine, 22, 55, 5)
|
||||
|
||||
assert engine.begin.call_count == 1
|
||||
engine.reset_mock()
|
||||
engine.begin.reset_mock()
|
||||
|
||||
engine.name = 'somethingelse'
|
||||
engine.name = "somethingelse"
|
||||
set_sequence_ids(engine, 22, 55, 6)
|
||||
|
||||
assert engine.begin.call_count == 0
|
||||
@@ -418,8 +415,8 @@ def test_migrate_pairlocks(mocker, default_conf, fee, caplog):
|
||||
id, pair, reason, lock_time, lock_end_time, active)
|
||||
VALUES (2, '*', 'Lock all', '2021-07-12 18:41:03', '2021-07-12 19:00:00', 1)
|
||||
"""
|
||||
engine = create_engine('sqlite://')
|
||||
mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine)
|
||||
engine = create_engine("sqlite://")
|
||||
mocker.patch("freqtrade.persistence.models.create_engine", lambda *args, **kwargs: engine)
|
||||
# Create table using the old format
|
||||
with engine.begin() as connection:
|
||||
connection.execute(text(create_table_old))
|
||||
@@ -430,22 +427,28 @@ def test_migrate_pairlocks(mocker, default_conf, fee, caplog):
|
||||
connection.execute(text(create_index2))
|
||||
connection.execute(text(create_index3))
|
||||
|
||||
init_db(default_conf['db_url'])
|
||||
init_db(default_conf["db_url"])
|
||||
|
||||
assert len(PairLock.get_all_locks().all()) == 2
|
||||
assert len(PairLock.session.scalars(select(PairLock).filter(PairLock.pair == '*')).all()) == 1
|
||||
pairlocks = PairLock.session.scalars(select(PairLock).filter(PairLock.pair == 'ETH/BTC')).all()
|
||||
assert len(PairLock.session.scalars(select(PairLock).filter(PairLock.pair == "*")).all()) == 1
|
||||
pairlocks = PairLock.session.scalars(select(PairLock).filter(PairLock.pair == "ETH/BTC")).all()
|
||||
assert len(pairlocks) == 1
|
||||
assert pairlocks[0].pair == 'ETH/BTC'
|
||||
assert pairlocks[0].side == '*'
|
||||
assert pairlocks[0].pair == "ETH/BTC"
|
||||
assert pairlocks[0].side == "*"
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dialect', [
|
||||
'sqlite', 'postgresql', 'mysql', 'oracle', 'mssql',
|
||||
])
|
||||
@pytest.mark.parametrize(
|
||||
"dialect",
|
||||
[
|
||||
"sqlite",
|
||||
"postgresql",
|
||||
"mysql",
|
||||
"oracle",
|
||||
"mssql",
|
||||
],
|
||||
)
|
||||
def test_create_table_compiles(dialect):
|
||||
|
||||
dialect_mod = import_module(f"sqlalchemy.dialects.{dialect}")
|
||||
for table in ModelBase.metadata.tables.values():
|
||||
create_sql = str(CreateTable(table).compile(dialect=dialect_mod.dialect()))
|
||||
assert 'CREATE TABLE' in create_sql
|
||||
assert "CREATE TABLE" in create_sql
|
||||
|
||||
@@ -181,10 +181,10 @@ def test_trade_fromjson():
|
||||
Trade.commit()
|
||||
|
||||
assert trade.id == 25
|
||||
assert trade.pair == 'ETH/USDT'
|
||||
assert trade.pair == "ETH/USDT"
|
||||
assert trade.open_date_utc == datetime(2022, 10, 18, 9, 12, 42, tzinfo=timezone.utc)
|
||||
assert isinstance(trade.open_date, datetime)
|
||||
assert trade.exit_reason == 'no longer good'
|
||||
assert trade.exit_reason == "no longer good"
|
||||
assert trade.realized_profit == 2.76315361
|
||||
assert trade.precision_mode == 2
|
||||
assert trade.amount_precision == 1.0
|
||||
@@ -199,7 +199,6 @@ def test_trade_fromjson():
|
||||
|
||||
@pytest.mark.usefixtures("init_persistence")
|
||||
def test_trade_serialize_load_back(fee):
|
||||
|
||||
create_mock_trades_usdt(fee, None)
|
||||
|
||||
t = Trade.get_trades([Trade.id == 1]).first()
|
||||
@@ -219,12 +218,22 @@ def test_trade_serialize_load_back(fee):
|
||||
assert len(trade.orders) == len(t.orders)
|
||||
assert trade.orders[0].funding_fee == t.orders[0].funding_fee
|
||||
excluded = [
|
||||
'trade_id', 'quote_currency', 'open_timestamp', 'close_timestamp',
|
||||
'realized_profit_ratio', 'close_profit_pct',
|
||||
'trade_duration_s', 'trade_duration',
|
||||
'profit_ratio', 'profit_pct', 'profit_abs', 'stop_loss_abs',
|
||||
'initial_stop_loss_abs', 'open_fill_date', 'open_fill_timestamp',
|
||||
'orders',
|
||||
"trade_id",
|
||||
"quote_currency",
|
||||
"open_timestamp",
|
||||
"close_timestamp",
|
||||
"realized_profit_ratio",
|
||||
"close_profit_pct",
|
||||
"trade_duration_s",
|
||||
"trade_duration",
|
||||
"profit_ratio",
|
||||
"profit_pct",
|
||||
"profit_abs",
|
||||
"stop_loss_abs",
|
||||
"initial_stop_loss_abs",
|
||||
"open_fill_date",
|
||||
"open_fill_timestamp",
|
||||
"orders",
|
||||
]
|
||||
failed = []
|
||||
# Ensure all attributes written can be read.
|
||||
@@ -233,29 +242,33 @@ def test_trade_serialize_load_back(fee):
|
||||
continue
|
||||
tattr = getattr(trade, obj, None)
|
||||
if isinstance(tattr, datetime):
|
||||
tattr = tattr.strftime('%Y-%m-%d %H:%M:%S')
|
||||
tattr = tattr.strftime("%Y-%m-%d %H:%M:%S")
|
||||
if tattr != value:
|
||||
failed.append((obj, tattr, value))
|
||||
|
||||
assert tjson.get('trade_id') == trade.id
|
||||
assert tjson.get('quote_currency') == trade.stake_currency
|
||||
assert tjson.get('stop_loss_abs') == trade.stop_loss
|
||||
assert tjson.get('initial_stop_loss_abs') == trade.initial_stop_loss
|
||||
assert tjson.get("trade_id") == trade.id
|
||||
assert tjson.get("quote_currency") == trade.stake_currency
|
||||
assert tjson.get("stop_loss_abs") == trade.stop_loss
|
||||
assert tjson.get("initial_stop_loss_abs") == trade.initial_stop_loss
|
||||
|
||||
excluded_o = [
|
||||
'order_filled_timestamp', 'ft_is_entry', 'pair', 'is_open', 'order_timestamp',
|
||||
"order_filled_timestamp",
|
||||
"ft_is_entry",
|
||||
"pair",
|
||||
"is_open",
|
||||
"order_timestamp",
|
||||
]
|
||||
order_obj = trade.orders[0]
|
||||
for obj, value in tjson['orders'][0].items():
|
||||
for obj, value in tjson["orders"][0].items():
|
||||
if obj in excluded_o:
|
||||
continue
|
||||
tattr = getattr(order_obj, obj, None)
|
||||
if isinstance(tattr, datetime):
|
||||
tattr = tattr.strftime('%Y-%m-%d %H:%M:%S')
|
||||
tattr = tattr.strftime("%Y-%m-%d %H:%M:%S")
|
||||
if tattr != value:
|
||||
failed.append((obj, tattr, value))
|
||||
|
||||
assert tjson['orders'][0]['pair'] == order_obj.ft_pair
|
||||
assert tjson["orders"][0]["pair"] == order_obj.ft_pair
|
||||
assert not failed
|
||||
|
||||
trade2 = LocalTrade.from_json(trade_string)
|
||||
|
||||
Reference in New Issue
Block a user