Merge pull request #11230 from freqtrade/dependabot/pip/develop/ruff-0.9.1

chore(deps-dev): bump ruff from 0.8.6 to 0.9.1
This commit is contained in:
Matthias
2025-01-13 19:10:51 +01:00
committed by GitHub
42 changed files with 71 additions and 90 deletions

View File

@@ -31,7 +31,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.8.6'
rev: 'v0.9.1'
hooks:
- id: ruff
- id: ruff-format

View File

@@ -334,7 +334,7 @@ AVAILABLE_CLI_OPTIONS = {
help="Specify the class name of the hyperopt loss function class (IHyperOptLoss). "
"Different functions can generate completely different results, "
"since the target for optimization is different. Built-in Hyperopt-loss-functions are: "
f'{", ".join(HYPEROPT_LOSS_BUILTIN)}',
f"{', '.join(HYPEROPT_LOSS_BUILTIN)}",
metavar="NAME",
),
"hyperoptexportfilename": Arg(
@@ -663,8 +663,7 @@ AVAILABLE_CLI_OPTIONS = {
"--ignore-missing-spaces",
"--ignore-unparameterized-spaces",
help=(
"Suppress errors for any requested Hyperopt spaces "
"that do not contain any parameters."
"Suppress errors for any requested Hyperopt spaces that do not contain any parameters."
),
action="store_true",
),

View File

@@ -15,8 +15,7 @@ logger = logging.getLogger(__name__)
def _check_data_config_download_sanity(config: Config) -> None:
if "days" in config and "timerange" in config:
raise ConfigurationError(
"--days and --timerange are mutually exclusive. "
"You can only specify one or the other."
"--days and --timerange are mutually exclusive. You can only specify one or the other."
)
if "pairs" not in config:

View File

@@ -259,8 +259,8 @@ def _download_pair_history(
logger.info(
f'Download history data for "{pair}", {timeframe}, '
f"{candle_type} and store in {datadir}. "
f'From {format_ms_time(since_ms) if since_ms else "start"} to '
f'{format_ms_time(until_ms) if until_ms else "now"}'
f"From {format_ms_time(since_ms) if since_ms else 'start'} to "
f"{format_ms_time(until_ms) if until_ms else 'now'}"
)
logger.debug(

View File

@@ -36,7 +36,7 @@ def check_exchange(config: Config, check_for_bad: bool = True) -> bool:
f"This command requires a configured exchange. You should either use "
f"`--exchange <exchange_name>` or specify a configuration file via `--config`.\n"
f"The following exchanges are available for Freqtrade: "
f'{", ".join(available_exchanges())}'
f"{', '.join(available_exchanges())}"
)
if not is_exchange_known_ccxt(exchange):
@@ -44,21 +44,21 @@ def check_exchange(config: Config, check_for_bad: bool = True) -> bool:
f'Exchange "{exchange}" is not known to the ccxt library '
f"and therefore not available for the bot.\n"
f"The following exchanges are available for Freqtrade: "
f'{", ".join(available_exchanges())}'
f"{', '.join(available_exchanges())}"
)
valid, reason, _ = validate_exchange(exchange)
if not valid:
if check_for_bad:
raise OperationalException(
f'Exchange "{exchange}" will not work with Freqtrade. ' f"Reason: {reason}"
f'Exchange "{exchange}" will not work with Freqtrade. Reason: {reason}'
)
else:
logger.warning(f'Exchange "{exchange}" will not work with Freqtrade. Reason: {reason}')
if MAP_EXCHANGE_CHILDCLASS.get(exchange, exchange) in SUPPORTED_EXCHANGES:
logger.info(
f'Exchange "{exchange}" is officially supported ' f"by the Freqtrade development team."
f'Exchange "{exchange}" is officially supported by the Freqtrade development team.'
)
else:
logger.warning(

View File

@@ -3004,8 +3004,7 @@ class Exchange:
trades.extend(t[x])
if from_id == from_id_next or t[-1][0] > until:
logger.debug(
f"Stopping because from_id did not change. "
f"Reached {t[-1][0]} > {until}"
f"Stopping because from_id did not change. Reached {t[-1][0]} > {until}"
)
# Reached the end of the defined-download period - add last trade as well.
if has_overlap:

View File

@@ -138,8 +138,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
)
logger.info(
f'Training model on {len(dk.data_dictionary["train_features"].columns)}'
f' features and {len(dd["train_features"])} data points'
f"Training model on {len(dk.data_dictionary['train_features'].columns)}"
f" features and {len(dd['train_features'])} data points"
)
self.set_train_and_eval_environments(dd, prices_train, prices_test, dk)
@@ -346,8 +346,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
)
elif prices_train.empty:
raise OperationalException(
"No prices found, please follow log warning "
"instructions to correct the strategy."
"No prices found, please follow log warning instructions to correct the strategy."
)
prices_train.rename(columns=rename_dict, inplace=True)

View File

@@ -43,8 +43,7 @@ class FreqaiMultiOutputClassifier(MultiOutputClassifier):
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi-output regression but has only one."
"y must have at least two dimensions for multi-output regression but has only one."
)
if sample_weight is not None and not has_fit_parameter(self.estimator, "sample_weight"):

View File

@@ -35,8 +35,7 @@ class FreqaiMultiOutputRegressor(MultiOutputRegressor):
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi-output regression but has only one."
"y must have at least two dimensions for multi-output regression but has only one."
)
if sample_weight is not None and not has_fit_parameter(self.estimator, "sample_weight"):

View File

@@ -426,7 +426,7 @@ class IFreqaiModel(ABC):
# append the historic data once per round
if self.dd.historic_data:
self.dd.update_historic_data(strategy, dk)
logger.debug(f'Updating historic data on pair {metadata["pair"]}')
logger.debug(f"Updating historic data on pair {metadata['pair']}")
self.track_current_candle()
(_, new_trained_timerange, data_load_timerange) = dk.check_if_new_training_required(

View File

@@ -45,8 +45,7 @@ class SKLearnRandomForestClassifier(BaseClassifierModel):
if self.freqai_info.get("continual_learning", False):
logger.warning(
"Continual learning is not supported for "
"SKLearnRandomForestClassifier, ignoring."
"Continual learning is not supported for SKLearnRandomForestClassifier, ignoring."
)
train_weights = data_dictionary["train_weights"]

View File

@@ -208,8 +208,7 @@ class LookaheadAnalysis(BaseAnalysis):
found_signals: int = self.full_varHolder.result["results"].shape[0] + 1
if found_signals >= self.targeted_trade_amount:
logger.info(
f"Found {found_signals} trades, "
f"calculating {self.targeted_trade_amount} trades."
f"Found {found_signals} trades, calculating {self.targeted_trade_amount} trades."
)
elif self.targeted_trade_amount >= found_signals >= self.minimum_trade_amount:
logger.info(f"Only found {found_signals} trades. Calculating all available trades.")

View File

@@ -112,6 +112,5 @@ class RecursiveAnalysisSubFunctions:
)
else:
logger.error(
"There was no strategy specified through --strategy "
"or timeframe was not specified."
"There was no strategy specified through --strategy or timeframe was not specified."
)

View File

@@ -163,15 +163,15 @@ def text_table_strategy(strategy_results, stake_currency: str, title: str):
# Align drawdown string on the center two space separator.
if "max_drawdown_account" in strategy_results[0]:
drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results]
drawdown = [f"{t['max_drawdown_account'] * 100:.2f}" for t in strategy_results]
else:
# Support for prior backtest results
drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results]
drawdown = [f"{t['max_drawdown_per']:.2f}" for t in strategy_results]
dd_pad_abs = max([len(t["max_drawdown_abs"]) for t in strategy_results])
dd_pad_per = max([len(dd) for dd in drawdown])
drawdown = [
f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%'
f"{t['max_drawdown_abs']:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%"
for t, dd in zip(strategy_results, drawdown, strict=False)
]
@@ -315,7 +315,7 @@ def text_table_add_metrics(strat_results: dict) -> None:
(
"Profit factor",
(
f'{strat_results["profit_factor"]:.2f}'
f"{strat_results['profit_factor']:.2f}"
if "profit_factor" in strat_results
else "N/A"
),

View File

@@ -75,8 +75,7 @@ def init_db(db_url: str) -> None:
engine = create_engine(db_url, future=True, **kwargs)
except NoSuchModuleError:
raise OperationalException(
f"Given value for db_url: '{db_url}' "
f"is no valid database URL! (See {_SQL_DOCS_URL})"
f"Given value for db_url: '{db_url}' is no valid database URL! (See {_SQL_DOCS_URL})"
)
# https://docs.sqlalchemy.org/en/13/orm/contextual.html#thread-local-scope

View File

@@ -145,7 +145,7 @@ def add_indicators(fig, row, indicators: dict[str, dict], data: pd.DataFrame) ->
fig.add_trace(trace, row, 1)
else:
logger.info(
'Indicator "%s" ignored. Reason: This indicator is not found ' "in your strategy.",
'Indicator "%s" ignored. Reason: This indicator is not found in your strategy.',
indicator,
)
@@ -394,13 +394,12 @@ def add_areas(fig, row: int, data: pd.DataFrame, indicators) -> make_subplots:
)
elif indicator not in data:
logger.info(
'Indicator "%s" ignored. Reason: This indicator is not '
"found in your strategy.",
'Indicator "%s" ignored. Reason: This indicator is not found in your strategy.',
indicator,
)
elif indicator_b not in data:
logger.info(
'fill_to: "%s" ignored. Reason: This indicator is not ' "in your strategy.",
'fill_to: "%s" ignored. Reason: This indicator is not in your strategy.',
indicator_b,
)
return fig

View File

@@ -158,8 +158,7 @@ class PriceFilter(IPairList):
if self._min_price != 0:
if price < self._min_price:
self.log_once(
f"Removed {pair} from whitelist, "
f"because last price < {self._min_price:.8f}",
f"Removed {pair} from whitelist, because last price < {self._min_price:.8f}",
logger.info,
)
return False
@@ -168,8 +167,7 @@ class PriceFilter(IPairList):
if self._max_price != 0:
if price > self._max_price:
self.log_once(
f"Removed {pair} from whitelist, "
f"because last price > {self._max_price:.8f}",
f"Removed {pair} from whitelist, because last price > {self._max_price:.8f}",
logger.info,
)
return False

View File

@@ -58,13 +58,12 @@ class RemotePairList(IPairList):
if self._mode not in ["whitelist", "blacklist"]:
raise OperationalException(
"`mode` not configured correctly. Supported Modes " 'are "whitelist","blacklist"'
'`mode` not configured correctly. Supported Modes are "whitelist","blacklist"'
)
if self._processing_mode not in ["filter", "append"]:
raise OperationalException(
"`processing_mode` not configured correctly. Supported Modes "
'are "filter","append"'
'`processing_mode` not configured correctly. Supported Modes are "filter","append"'
)
if self._pairlist_pos == 0 and self._mode == "blacklist":

View File

@@ -41,8 +41,7 @@ class SpreadFilter(IPairList):
Short whitelist method description - used for startup-messages
"""
return (
f"{self.name} - Filtering pairs with ask/bid diff above "
f"{self._max_spread_ratio:.2%}."
f"{self.name} - Filtering pairs with ask/bid diff above {self._max_spread_ratio:.2%}."
)
@staticmethod

View File

@@ -1,5 +1,5 @@
# isort: off
from freqtrade.rpc.api_server.ws.types import WebSocketType # noqa: F401
from freqtrade.rpc.api_server.ws.ws_types import WebSocketType # noqa: F401
from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy # noqa: F401
from freqtrade.rpc.api_server.ws.serializer import HybridJSONWebSocketSerializer # noqa: F401
from freqtrade.rpc.api_server.ws.channel import WebSocketChannel # noqa: F401

View File

@@ -15,7 +15,7 @@ from freqtrade.rpc.api_server.ws.serializer import (
HybridJSONWebSocketSerializer,
WebSocketSerializer,
)
from freqtrade.rpc.api_server.ws.types import WebSocketType
from freqtrade.rpc.api_server.ws.ws_types import WebSocketType
from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType

View File

@@ -3,7 +3,7 @@ from typing import Any
from fastapi import WebSocket as FastAPIWebSocket
from websockets.asyncio.client import ClientConnection as WebSocket
from freqtrade.rpc.api_server.ws.types import WebSocketType
from freqtrade.rpc.api_server.ws.ws_types import WebSocketType
class WebSocketProxy:

View File

@@ -94,8 +94,7 @@ class Webhook(RPCHandler):
self._send_msg(payload)
except KeyError as exc:
logger.exception(
"Problem calling Webhook. Please check your webhook configuration. "
"Exception: %s",
"Problem calling Webhook. Please check your webhook configuration. Exception: %s",
exc,
)

View File

@@ -7,7 +7,7 @@
-r docs/requirements-docs.txt
coveralls==4.0.1
ruff==0.8.6
ruff==0.9.1
mypy==1.14.1
pre-commit==4.0.1
pytest==8.3.4

View File

@@ -12,6 +12,6 @@ def test_startup_time():
start = time.time()
subprocess.run(["freqtrade", "-h"])
elapsed = time.time() - start
assert (
elapsed < MAXIMUM_STARTUP_TIME
), "The startup time is too long, try to use lazy import in the command entry function"
assert elapsed < MAXIMUM_STARTUP_TIME, (
"The startup time is too long, try to use lazy import in the command entry function"
)

View File

@@ -120,8 +120,7 @@ def test_ohlcv_fill_up_missing_data(testdatadir, caplog):
assert (data.columns == data2.columns).all()
assert log_has_re(
f"Missing data fillup for UNITTEST/BTC, 1m: before: "
f"{len(data)} - after: {len(data2)}.*",
f"Missing data fillup for UNITTEST/BTC, 1m: before: {len(data)} - after: {len(data2)}.*",
caplog,
)

View File

@@ -556,9 +556,9 @@ def test_analyze_with_orderflow(
assert col in df2.columns, f"Round2: Column {col} not found in df.columns"
if col not in ("stacked_imbalances_bid", "stacked_imbalances_ask"):
assert (
df2[col].count() == 5
), f"Round2: Column {col} has {df2[col].count()} non-NaN values"
assert df2[col].count() == 5, (
f"Round2: Column {col} has {df2[col].count()} non-NaN values"
)
lastval_trade2 = df2.at[len(df2) - 1, "trades"]
assert isinstance(lastval_trade2, list)

View File

@@ -64,7 +64,7 @@ def test_load_data_30min_timeframe(caplog, testdatadir) -> None:
ld = load_pair_history(pair="UNITTEST/BTC", timeframe="30m", datadir=testdatadir)
assert isinstance(ld, DataFrame)
assert not log_has(
'Download history data for pair: "UNITTEST/BTC", timeframe: 30m ' "and store in None.",
'Download history data for pair: "UNITTEST/BTC", timeframe: 30m and store in None.',
caplog,
)
@@ -86,7 +86,7 @@ def test_load_data_1min_timeframe(ohlcv_history, mocker, caplog, testdatadir) ->
load_data(datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"])
assert file.is_file()
assert not log_has(
'Download history data for pair: "UNITTEST/BTC", interval: 1m ' "and store in None.", caplog
'Download history data for pair: "UNITTEST/BTC", interval: 1m and store in None.', caplog
)
@@ -96,7 +96,7 @@ def test_load_data_mark(ohlcv_history, mocker, caplog, testdatadir) -> None:
load_data(datadir=testdatadir, timeframe="1h", pairs=["UNITTEST/BTC"], candle_type="mark")
assert file.is_file()
assert not log_has(
'Download history data for pair: "UNITTEST/USDT:USDT", interval: 1m ' "and store in None.",
'Download history data for pair: "UNITTEST/USDT:USDT", interval: 1m and store in None.',
caplog,
)

View File

@@ -45,7 +45,7 @@ def test__get_params_binance(default_conf, mocker, side, order_type, time_in_for
)
def test_create_stoploss_order_binance(default_conf, mocker, limitratio, expected, side, trademode):
api_mock = MagicMock()
order_id = f"test_prod_buy_{randint(0, 10 ** 6)}"
order_id = f"test_prod_buy_{randint(0, 10**6)}"
order_type = "stop_loss_limit" if trademode == TradingMode.SPOT else "stop"
api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}})

View File

@@ -1260,7 +1260,7 @@ def test_create_dry_run_order_market_fill(
@pytest.mark.parametrize("exchange_name", EXCHANGES)
def test_create_order(default_conf, mocker, side, ordertype, rate, marketprice, exchange_name):
api_mock = MagicMock()
order_id = f"test_prod_{side}_{randint(0, 10 ** 6)}"
order_id = f"test_prod_{side}_{randint(0, 10**6)}"
api_mock.options = {} if not marketprice else {"createMarketBuyOrderRequiresPrice": True}
api_mock.create_order = MagicMock(
return_value={"id": order_id, "info": {"foo": "bar"}, "symbol": "XLTCUSDT", "amount": 1}
@@ -1339,7 +1339,7 @@ def test_buy_dry_run(default_conf, mocker, exchange_name):
@pytest.mark.parametrize("exchange_name", EXCHANGES)
def test_buy_prod(default_conf, mocker, exchange_name):
api_mock = MagicMock()
order_id = f"test_prod_buy_{randint(0, 10 ** 6)}"
order_id = f"test_prod_buy_{randint(0, 10**6)}"
order_type = "market"
time_in_force = "gtc"
api_mock.options = {}
@@ -1460,7 +1460,7 @@ def test_buy_prod(default_conf, mocker, exchange_name):
@pytest.mark.parametrize("exchange_name", EXCHANGES)
def test_buy_considers_time_in_force(default_conf, mocker, exchange_name):
api_mock = MagicMock()
order_id = f"test_prod_buy_{randint(0, 10 ** 6)}"
order_id = f"test_prod_buy_{randint(0, 10**6)}"
api_mock.options = {}
api_mock.create_order = MagicMock(
return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}}
@@ -1537,7 +1537,7 @@ def test_sell_dry_run(default_conf, mocker):
@pytest.mark.parametrize("exchange_name", EXCHANGES)
def test_sell_prod(default_conf, mocker, exchange_name):
api_mock = MagicMock()
order_id = f"test_prod_sell_{randint(0, 10 ** 6)}"
order_id = f"test_prod_sell_{randint(0, 10**6)}"
order_type = "market"
api_mock.options = {}
api_mock.create_order = MagicMock(
@@ -1617,7 +1617,7 @@ def test_sell_prod(default_conf, mocker, exchange_name):
@pytest.mark.parametrize("exchange_name", EXCHANGES)
def test_sell_considers_time_in_force(default_conf, mocker, exchange_name):
api_mock = MagicMock()
order_id = f"test_prod_sell_{randint(0, 10 ** 6)}"
order_id = f"test_prod_sell_{randint(0, 10**6)}"
api_mock.create_order = MagicMock(
return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}}
)
@@ -6221,7 +6221,7 @@ def test_get_liquidation_price(
)
def test_stoploss_contract_size(mocker, default_conf, contract_size, order_amount):
api_mock = MagicMock()
order_id = f"test_prod_buy_{randint(0, 10 ** 6)}"
order_id = f"test_prod_buy_{randint(0, 10**6)}"
api_mock.create_order = MagicMock(
return_value={

View File

@@ -19,7 +19,7 @@ from tests.exchange.test_exchange import ccxt_exceptionhandlers
)
def test_create_stoploss_order_htx(default_conf, mocker, limitratio, expected, side):
api_mock = MagicMock()
order_id = f"test_prod_buy_{randint(0, 10 ** 6)}"
order_id = f"test_prod_buy_{randint(0, 10**6)}"
order_type = "stop-limit"
api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}})

View File

@@ -23,7 +23,7 @@ STOPLOSS_LIMIT_ORDERTYPE = "stop-loss-limit"
)
def test_kraken_trading_agreement(default_conf, mocker, order_type, time_in_force, expected_params):
api_mock = MagicMock()
order_id = f"test_prod_{order_type}_{randint(0, 10 ** 6)}"
order_id = f"test_prod_{order_type}_{randint(0, 10**6)}"
api_mock.options = {}
api_mock.create_order = MagicMock(
return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}}
@@ -168,7 +168,7 @@ def test_get_balances_prod_kraken(default_conf, mocker):
)
def test_create_stoploss_order_kraken(default_conf, mocker, ordertype, side, adjustedprice):
api_mock = MagicMock()
order_id = f"test_prod_buy_{randint(0, 10 ** 6)}"
order_id = f"test_prod_buy_{randint(0, 10**6)}"
api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}})

View File

@@ -20,7 +20,7 @@ from tests.exchange.test_exchange import ccxt_exceptionhandlers
)
def test_create_stoploss_order_kucoin(default_conf, mocker, limitratio, expected, side, order_type):
api_mock = MagicMock()
order_id = f"test_prod_buy_{randint(0, 10 ** 6)}"
order_id = f"test_prod_buy_{randint(0, 10**6)}"
api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}})
default_conf["dry_run"] = False
@@ -154,7 +154,7 @@ def test_stoploss_adjust_kucoin(mocker, default_conf):
)
def test_kucoin_create_order(default_conf, mocker, side, ordertype, rate):
api_mock = MagicMock()
order_id = f"test_prod_{side}_{randint(0, 10 ** 6)}"
order_id = f"test_prod_{side}_{randint(0, 10**6)}"
api_mock.create_order = MagicMock(
return_value={"id": order_id, "info": {"foo": "bar"}, "symbol": "XRP/USDT", "amount": 1}
)

View File

@@ -275,9 +275,9 @@ class TestCCXTExchange:
candles = res[3]
candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * factor
candle_count1 = (now.timestamp() * 1000 - since_ms) // timeframe_ms * factor
assert len(candles) >= min(
candle_count, candle_count1
), f"{len(candles)} < {candle_count} in {timeframe}, Offset: {offset} {factor}"
assert len(candles) >= min(candle_count, candle_count1), (
f"{len(candles)} < {candle_count} in {timeframe}, Offset: {offset} {factor}"
)
# Check if first-timeframe is either the start, or start + 1
assert candles[0][0] == since_ms or (since_ms + timeframe_ms)

View File

@@ -702,7 +702,7 @@ def test_process_trade_creation(
assert pytest.approx(trade.amount_requested) == 60 / ticker_usdt.return_value[ticker_side]
assert log_has(
f'{"Short" if is_short else "Long"} signal found: about create a new trade for ETH/USDT '
f"{'Short' if is_short else 'Long'} signal found: about create a new trade for ETH/USDT "
"with stake_amount: 60.0 ...",
caplog,
)

View File

@@ -2598,7 +2598,7 @@ def test_backtest_start_multi_strat_caching(
"Parameter -i/--timeframe detected ... Using timeframe: 1m ...",
"Parameter --timerange detected: 1510694220-1510700340 ...",
f"Using data directory: {testdatadir} ...",
"Loading data from 2017-11-14 20:57:00 " "up to 2017-11-14 22:59:00 (0 days).",
"Loading data from 2017-11-14 20:57:00 up to 2017-11-14 22:59:00 (0 days).",
"Parameter --enable-position-stacking detected ...",
]

View File

@@ -284,7 +284,7 @@ def test_api_token_login(botclient):
rc = client.get(
f"{BASE_URI}/count",
headers={
"Authorization": f'Bearer {rc.json()["access_token"]}',
"Authorization": f"Bearer {rc.json()['access_token']}",
"Origin": "http://example.com",
},
)
@@ -299,7 +299,7 @@ def test_api_token_refresh(botclient):
f"{BASE_URI}/token/refresh",
data=None,
headers={
"Authorization": f'Bearer {rc.json()["refresh_token"]}',
"Authorization": f"Bearer {rc.json()['refresh_token']}",
"Origin": "http://example.com",
},
)

View File

@@ -2876,8 +2876,7 @@ async def test_telegram_list_custom_data(default_conf_usdt, update, ticker, fee,
assert msg_mock.call_count == 3
assert "Found custom-data entries: " in msg_mock.call_args_list[0][0][0]
assert (
"*Key:* `test_int`\n*ID:* `1`\n*Trade ID:* `1`\n*Type:* `int`\n"
"*Value:* `1`\n*Create Date:*"
"*Key:* `test_int`\n*ID:* `1`\n*Trade ID:* `1`\n*Type:* `int`\n*Value:* `1`\n*Create Date:*"
) in msg_mock.call_args_list[1][0][0]
assert (
"*Key:* `test_dict`\n*ID:* `2`\n*Trade ID:* `1`\n*Type:* `dict`\n"

View File

@@ -383,8 +383,7 @@ def test_exception_send_msg(default_conf, mocker, caplog):
}
webhook.send_msg(msg)
assert log_has(
"Problem calling Webhook. Please check your webhook configuration. "
"Exception: 'DEADBEEF'",
"Problem calling Webhook. Please check your webhook configuration. Exception: 'DEADBEEF'",
caplog,
)

View File

@@ -111,7 +111,7 @@ def test_load_config_file_error_range(default_conf, mocker, caplog) -> None:
x = log_config_error_range("somefile", "Parse error at offset 4: Invalid value.")
assert isinstance(x, str)
assert x == ' "max_open_trades": 1,\n "stake_currency": "BTC",\n' ' "stake_amount": .001,'
assert x == ' "max_open_trades": 1,\n "stake_currency": "BTC",\n "stake_amount": .001,'
x = log_config_error_range("-", "")
assert x == ""

View File

@@ -31,7 +31,7 @@ def test_create_userdata_dir(mocker, tmp_path, caplog) -> None:
x = create_userdata_dir(tmp_path / "bar", create_dir=True)
assert md.call_count == 10
assert md.call_args[1]["parents"] is False
assert log_has(f'Created user-data directory: {tmp_path / "bar"}', caplog)
assert log_has(f"Created user-data directory: {tmp_path / 'bar'}", caplog)
assert isinstance(x, Path)
assert str(x) == str(tmp_path / "bar")