From 9c5ba0732a3440c95487cb13263bd394d0670647 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Tue, 8 Nov 2022 10:32:18 -0300 Subject: [PATCH 01/71] save predictions with date and merge by date --- freqtrade/freqai/data_kitchen.py | 23 +++++++++++++++-------- freqtrade/freqai/freqai_interface.py | 2 +- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 12a3cd519..8fd42ee85 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -979,7 +979,8 @@ class FreqaiDataKitchen: return weights def get_predictions_to_append(self, predictions: DataFrame, - do_predict: npt.ArrayLike) -> DataFrame: + do_predict: npt.ArrayLike, + dataframe_backtest: DataFrame) -> DataFrame: """ Get backtest prediction from current backtest period """ @@ -1001,7 +1002,9 @@ class FreqaiDataKitchen: if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0: append_df["DI_values"] = self.DI_values - return append_df + dataframe_backtest.reset_index(drop=True, inplace=True) + merged_df = pd.concat([dataframe_backtest["date"], append_df], axis=1) + return merged_df def append_predictions(self, append_df: DataFrame) -> None: """ @@ -1019,15 +1022,19 @@ class FreqaiDataKitchen: when it goes back to the strategy. These rows are not included in the backtest. """ - len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count - filler_df = pd.DataFrame( - np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns - ) + # len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count + # filler_df = pd.DataFrame( + # np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns + # ) - self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True) + # self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True) to_keep = [col for col in dataframe.columns if not col.startswith("&")] - self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1) + # self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1) + # self.full_df = DataFrame() + + self.return_dataframe = pd.merge(dataframe[to_keep], + self.full_df, how='left', on='date') self.full_df = DataFrame() return diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index ae123f852..59a82958b 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -301,7 +301,7 @@ class IFreqaiModel(ABC): self.model = self.dd.load_data(pair, dk) pred_df, do_preds = self.predict(dataframe_backtest, dk) - append_df = dk.get_predictions_to_append(pred_df, do_preds) + append_df = dk.get_predictions_to_append(pred_df, do_preds, dataframe_backtest) dk.append_predictions(append_df) dk.save_backtesting_prediction(append_df) From 8d9988a942aba46f4e7eb851d51f30497983e1b7 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Tue, 8 Nov 2022 11:06:23 -0300 Subject: [PATCH 02/71] enforce date column in backtesting freqai predictions files --- freqtrade/freqai/data_kitchen.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 8fd42ee85..b99447ac9 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1021,20 +1021,11 @@ class FreqaiDataKitchen: Back fill values to before the backtesting range so that the dataframe matches size when it goes back to the strategy. These rows are not included in the backtest. """ - - # len_filler = len(dataframe) - len(self.full_df.index) # startup_candle_count - # filler_df = pd.DataFrame( - # np.zeros((len_filler, len(self.full_df.columns))), columns=self.full_df.columns - # ) - - # self.full_df = pd.concat([filler_df, self.full_df], axis=0, ignore_index=True) - to_keep = [col for col in dataframe.columns if not col.startswith("&")] - # self.return_dataframe = pd.concat([dataframe[to_keep], self.full_df], axis=1) - # self.full_df = DataFrame() - self.return_dataframe = pd.merge(dataframe[to_keep], self.full_df, how='left', on='date') + self.return_dataframe[self.full_df.columns] = ( + self.return_dataframe[self.full_df.columns].fillna(value=0)) self.full_df = DataFrame() return @@ -1368,12 +1359,13 @@ class FreqaiDataKitchen: if file_exists: append_df = self.get_backtesting_prediction() - if len(append_df) == len_backtest_df: + if len(append_df) == len_backtest_df and 'date' in append_df: logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") return True else: logger.info("A new backtesting prediction file is required. " - "(Number of predictions is different from dataframe length).") + "(Number of predictions is different from dataframe length or " + "old prediction file version).") return False else: logger.info( From 3e57c18ac60e4d5310f3c4044b5d0ba59a0cb822 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Tue, 8 Nov 2022 18:20:39 -0300 Subject: [PATCH 03/71] add fix_live_predictions function to backtesting --- freqtrade/freqai/data_kitchen.py | 2 +- freqtrade/freqai/freqai_interface.py | 52 ++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index b99447ac9..53de00426 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1014,7 +1014,7 @@ class FreqaiDataKitchen: if self.full_df.empty: self.full_df = append_df else: - self.full_df = pd.concat([self.full_df, append_df], axis=0) + self.full_df = pd.concat([self.full_df, append_df], axis=0, ignore_index=True) def fill_predictions(self, dataframe): """ diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 59a82958b..ab0fb388a 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -305,6 +305,7 @@ class IFreqaiModel(ABC): dk.append_predictions(append_df) dk.save_backtesting_prediction(append_df) + self.backtesting_fit_live_predictions(dk) dk.fill_predictions(dataframe) return dk @@ -824,6 +825,57 @@ class IFreqaiModel(ABC): f"to {tr_train_stopts_str}, {train_it}/{total_trains} " "trains" ) + + def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen): + start_time = time.perf_counter() + fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) + if fit_live_predictions_candles: + predictions_columns = [col for col in dk.full_df.columns if ( + col.startswith("&") and + '_mean' not in col and + '_std' not in col and + col not in self.dk.data["extra_returns_per_train"]) + ] + self.dd.historic_predictions[self.dk.pair] = pd.DataFrame( + columns=dk.full_df.columns).astype(dk.full_df.dtypes) + + # for index, row in dk.full_df.iterrows(): + for index in range(len(dk.full_df)): + if index > fit_live_predictions_candles: + self.dd.historic_predictions[self.dk.pair] = ( + dk.full_df.iloc[index - fit_live_predictions_candles + 1:index + 1]) + else: + self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index + 1] + # self.dd.historic_predictions[self.dk.pair].loc[index] = row.values.tolist() + # pd.concat(self.dd.historic_predictions[self.dk.pair], row.values) + self.fit_live_predictions(self.dk, self.dk.pair) + if index > fit_live_predictions_candles: + print(index) + + if index <= fit_live_predictions_candles: + dk.full_df.at[index, "warmed_up"] = 0 + else: + dk.full_df.at[index, "warmed_up"] = 1 + + for label in predictions_columns: + if dk.full_df[label].dtype == object: + continue + if "labels_mean" in self.dk.data: + dk.full_df.at[index, f"{label}_mean"] = ( + self.dk.data["labels_mean"][label]) + if "labels_std" in self.dk.data: + dk.full_df.at[index, f"{label}_std"] = self.dk.data["labels_std"][label] + + for extra_col in self.dk.data["extra_returns_per_train"]: + dk.full_df.at[index, f"{extra_col}"] = ( + self.dk.data["extra_returns_per_train"][extra_col]) + + end_time = time.perf_counter() + logger.info(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") + + # print(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") + + return # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. From 8ee95db9270376eb459a172391f800986baec3c5 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Wed, 9 Nov 2022 09:51:42 -0300 Subject: [PATCH 04/71] refactoring backtesting_fit_live_predictions function --- freqtrade/freqai/freqai_interface.py | 39 ++++++++++------------------ 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index ab0fb388a..1dc326079 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -827,37 +827,31 @@ class IFreqaiModel(ABC): ) def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen): - start_time = time.perf_counter() + """ + Apply fit_live_predictions function in backtesting with a dummy historic_predictions + :param dk: datakitchen object + """ fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) if fit_live_predictions_candles: - predictions_columns = [col for col in dk.full_df.columns if ( + label_columns = [col for col in dk.full_df.columns if ( col.startswith("&") and - '_mean' not in col and - '_std' not in col and + not (col.startswith("&") and col.endswith("_mean")) and + not (col.startswith("&") and col.endswith("_std")) and col not in self.dk.data["extra_returns_per_train"]) ] self.dd.historic_predictions[self.dk.pair] = pd.DataFrame( columns=dk.full_df.columns).astype(dk.full_df.dtypes) - # for index, row in dk.full_df.iterrows(): for index in range(len(dk.full_df)): - if index > fit_live_predictions_candles: + if index >= fit_live_predictions_candles: self.dd.historic_predictions[self.dk.pair] = ( - dk.full_df.iloc[index - fit_live_predictions_candles + 1:index + 1]) + dk.full_df.iloc[index - fit_live_predictions_candles:index]) else: - self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index + 1] - # self.dd.historic_predictions[self.dk.pair].loc[index] = row.values.tolist() - # pd.concat(self.dd.historic_predictions[self.dk.pair], row.values) + self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index] + self.fit_live_predictions(self.dk, self.dk.pair) - if index > fit_live_predictions_candles: - print(index) - - if index <= fit_live_predictions_candles: - dk.full_df.at[index, "warmed_up"] = 0 - else: - dk.full_df.at[index, "warmed_up"] = 1 - - for label in predictions_columns: + if index >= fit_live_predictions_candles: + for label in label_columns: if dk.full_df[label].dtype == object: continue if "labels_mean" in self.dk.data: @@ -869,13 +863,8 @@ class IFreqaiModel(ABC): for extra_col in self.dk.data["extra_returns_per_train"]: dk.full_df.at[index, f"{extra_col}"] = ( self.dk.data["extra_returns_per_train"][extra_col]) - - end_time = time.perf_counter() - logger.info(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") - - # print(f"Downloaded the tutorial in {start_time - end_time:0.4f} seconds") - return + # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. From 4f0f3e5b64cf38a96dbf8aa7fa1cb21177373d7a Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Wed, 9 Nov 2022 10:07:24 -0300 Subject: [PATCH 05/71] removed unnecessary code --- freqtrade/freqai/freqai_interface.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 1dc326079..09e965b82 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -829,6 +829,8 @@ class IFreqaiModel(ABC): def backtesting_fit_live_predictions(self, dk: FreqaiDataKitchen): """ Apply fit_live_predictions function in backtesting with a dummy historic_predictions + The loop is required to simulate dry/live operation, as it is not possible to predict + the type of logic implemented by the user. :param dk: datakitchen object """ fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) @@ -839,8 +841,6 @@ class IFreqaiModel(ABC): not (col.startswith("&") and col.endswith("_std")) and col not in self.dk.data["extra_returns_per_train"]) ] - self.dd.historic_predictions[self.dk.pair] = pd.DataFrame( - columns=dk.full_df.columns).astype(dk.full_df.dtypes) for index in range(len(dk.full_df)): if index >= fit_live_predictions_candles: From 7adca9735862e1f186f8a6150bcdfca42c067e2c Mon Sep 17 00:00:00 2001 From: Matthias Date: Sat, 12 Nov 2022 15:39:54 +0100 Subject: [PATCH 06/71] Improve python GC behavior --- freqtrade/main.py | 3 +++ freqtrade/util/gc_setup.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 freqtrade/util/gc_setup.py diff --git a/freqtrade/main.py b/freqtrade/main.py index 754c536d0..0a46747ea 100755 --- a/freqtrade/main.py +++ b/freqtrade/main.py @@ -7,6 +7,8 @@ import logging import sys from typing import Any, List +from freqtrade.util.gc_setup import gc_set_threshold + # check min. python version if sys.version_info < (3, 8): # pragma: no cover @@ -36,6 +38,7 @@ def main(sysargv: List[str] = None) -> None: # Call subcommand. if 'func' in args: logger.info(f'freqtrade {__version__}') + gc_set_threshold() return_code = args['func'](args) else: # No subcommand was issued. diff --git a/freqtrade/util/gc_setup.py b/freqtrade/util/gc_setup.py new file mode 100644 index 000000000..a3532cbab --- /dev/null +++ b/freqtrade/util/gc_setup.py @@ -0,0 +1,18 @@ +import gc +import logging +import platform + + +logger = logging.getLogger(__name__) + + +def gc_set_threshold(): + """ + Reduce number of GC runs to improve performance (explanation video) + https://www.youtube.com/watch?v=p4Sn6UcFTOU + + """ + if platform.python_implementation() == "CPython": + # allocs, g1, g2 = gc.get_threshold() + gc.set_threshold(50_000, 500, 1000) + logger.debug("Adjusting python allocations to reduce GC runs") From 27fa9f1f4e8542f90a0361f207b0a131011eac93 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Sat, 12 Nov 2022 14:37:23 -0300 Subject: [PATCH 07/71] backtest saved dataframe from live --- freqtrade/freqai/data_kitchen.py | 109 ++++++++++++++++++++++++++- freqtrade/freqai/freqai_interface.py | 42 ++++++++++- freqtrade/freqai/utils.py | 7 +- 3 files changed, 154 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 53de00426..8e30b0aec 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -from pandas import DataFrame +from pandas import DataFrame, read_feather from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -73,6 +73,9 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() + self.backtesting_live_model_folder_path = Path() + self.backtesting_live_model_path = Path() + self.backtesting_live_model_bkp_path = Path() self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -1488,3 +1491,107 @@ class FreqaiDataKitchen: dataframe.columns = dataframe.columns.str.replace(c, "") return dataframe + + def set_backtesting_live_dataframe_folder_path( + self + ) -> None: + """ + Set live backtesting dataframe path + :param pair: current pair + """ + self.backtesting_live_model_folder_path = Path( + self.full_path / self.backtest_predictions_folder / "live_data") + + def set_backtesting_live_dataframe_path( + self, pair: str + ) -> None: + """ + Set live backtesting dataframe path + :param pair: current pair + """ + self.set_backtesting_live_dataframe_folder_path() + if not self.backtesting_live_model_folder_path.is_dir(): + self.backtesting_live_model_folder_path.mkdir(parents=True, exist_ok=True) + + pair_path = pair.split(":")[0].replace("/", "_").lower() + file_name = f"live_backtesting_{pair_path}.feather" + path_to_live_backtesting_file = Path(self.full_path / + self.backtesting_live_model_folder_path / + file_name) + path_to_live_backtesting_bkp_file = Path(self.full_path / + self.backtesting_live_model_folder_path / + file_name.replace(".feather", ".backup.feather")) + + self.backtesting_live_model_path = path_to_live_backtesting_file + self.backtesting_live_model_bkp_path = path_to_live_backtesting_bkp_file + + def save_backtesting_live_dataframe( + self, dataframe: DataFrame, pair: str + ) -> None: + """ + Save live backtesting dataframe to feather file format + :param dataframe: current live dataframe + :param pair: current pair + """ + self.set_backtesting_live_dataframe_path(pair) + last_row_df = dataframe.tail(1) + if self.backtesting_live_model_path.is_file(): + saved_dataframe = self.get_backtesting_live_dataframe() + concat_dataframe = pd.concat([saved_dataframe, last_row_df]) + concat_dataframe.reset_index(drop=True).to_feather( + self.backtesting_live_model_path, compression_level=9, compression='lz4') + else: + last_row_df.reset_index(drop=True).to_feather( + self.backtesting_live_model_path, compression_level=9, compression='lz4') + + shutil.copy(self.backtesting_live_model_path, self.backtesting_live_model_bkp_path) + + def get_backtesting_live_dataframe( + self + ) -> DataFrame: + """ + Get live backtesting dataframe from feather file format + return: saved dataframe from previous dry/run or live + """ + if self.backtesting_live_model_path.is_file(): + saved_dataframe = DataFrame() + try: + saved_dataframe = read_feather(self.backtesting_live_model_path) + except Exception: + saved_dataframe = read_feather(self.backtesting_live_model_bkp_path) + return saved_dataframe + else: + raise OperationalException( + "Saved pair file not found" + ) + + def get_timerange_from_backtesting_live_dataframe( + self) -> TimeRange: + """ + Returns timerange information based on a FreqAI model directory + :param models_path: FreqAI model path + + :return: timerange calculated from saved live data + """ + all_assets_start_dates = [] + all_assets_end_dates = [] + self.set_backtesting_live_dataframe_folder_path() + if not self.backtesting_live_model_folder_path.is_dir(): + raise OperationalException( + 'Saved live data not found. Saved lived data is required ' + 'to run backtest with the freqai-backtest-live-models option ' + 'and save_live_data_backtest config option as true' + ) + for file_in_dir in self.backtesting_live_model_folder_path.iterdir(): + if file_in_dir.is_file() and "backup" not in file_in_dir.name: + saved_dataframe = read_feather(file_in_dir) + all_assets_start_dates.append(saved_dataframe.date.min()) + all_assets_end_dates.append(saved_dataframe.date.max()) + start_date = min(all_assets_start_dates) + end_date = min(all_assets_end_dates) + # add 1 day to string timerange to ensure BT module will load all dataframe data + end_date = end_date + timedelta(days=1) + backtesting_timerange = TimeRange( + 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) + ) + return backtesting_timerange diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 09e965b82..47d75dfaa 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -67,6 +67,11 @@ class IFreqaiModel(ABC): self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True) if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') + self.save_live_data_backtest: bool = self.freqai_info.get( + "save_live_data_backtest", True) + if self.save_live_data_backtest: + logger.info('Live configured to save data for backtest.') + self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) # set current candle to arbitrary historical date self.current_candle: datetime = datetime.fromtimestamp(637887600, tz=timezone.utc) @@ -147,12 +152,20 @@ class IFreqaiModel(ABC): dataframe = self.dk.use_strategy_to_populate_indicators( strategy, prediction_dataframe=dataframe, pair=metadata["pair"] ) - dk = self.start_backtesting(dataframe, metadata, self.dk) + if not self.save_live_data_backtest: + dk = self.start_backtesting(dataframe, metadata, self.dk) + dataframe = dk.remove_features_from_df(dk.return_dataframe) + else: + dk = self.start_backtesting_from_live_saved_files( + dataframe, metadata, self.dk) + dataframe = dk.return_dataframe - dataframe = dk.remove_features_from_df(dk.return_dataframe) self.clean_up() if self.live: self.inference_timer('stop', metadata["pair"]) + if self.save_live_data_backtest: + dk.save_backtesting_live_dataframe(dataframe, metadata["pair"]) + return dataframe def clean_up(self): @@ -310,6 +323,31 @@ class IFreqaiModel(ABC): return dk + def start_backtesting_from_live_saved_files( + self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen + ) -> FreqaiDataKitchen: + """ + :param dataframe: DataFrame = strategy passed dataframe + :param metadata: Dict = pair metadata + :param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only + :return: + FreqaiDataKitchen = Data management/analysis tool associated to present pair only + """ + pair = metadata["pair"] + dk.return_dataframe = dataframe + + dk.return_dataframe = dataframe + self.dk.set_backtesting_live_dataframe_path(pair) + saved_dataframe = self.dk.get_backtesting_live_dataframe() + columns_to_drop = list(set(dk.return_dataframe.columns).difference( + ["date", "open", "high", "low", "close", "volume"])) + saved_dataframe = saved_dataframe.drop( + columns=["open", "high", "low", "close", "volume"]) + dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) + dk.return_dataframe = pd.merge(dk.return_dataframe, saved_dataframe, how='left', on='date') + # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) + return dk + def start_live( self, dataframe: DataFrame, metadata: dict, strategy: IStrategy, dk: FreqaiDataKitchen ) -> FreqaiDataKitchen: diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index e854bcf0b..ad38a339b 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -229,7 +229,12 @@ def get_timerange_backtest_live_models(config: Config) -> str: """ dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) - timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) + timerange: TimeRange = TimeRange() + if not config.get("save_live_data_backtest", True): + timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) + else: + timerange = dk.get_timerange_from_backtesting_live_dataframe() + start_date = datetime.fromtimestamp(timerange.startts, tz=timezone.utc) end_date = datetime.fromtimestamp(timerange.stopts, tz=timezone.utc) tr = f"{start_date.strftime('%Y%m%d')}-{end_date.strftime('%Y%m%d')}" From b01e4e3dbfcfebc72990e03399a7bcb93f231d5f Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 10:14:30 -0300 Subject: [PATCH 08/71] change default value - save_live_data_backtest as false --- freqtrade/freqai/freqai_interface.py | 2 +- freqtrade/freqai/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 47d75dfaa..cc6cd3c9b 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -68,7 +68,7 @@ class IFreqaiModel(ABC): if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') self.save_live_data_backtest: bool = self.freqai_info.get( - "save_live_data_backtest", True) + "save_live_data_backtest", False) if self.save_live_data_backtest: logger.info('Live configured to save data for backtest.') diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index ad38a339b..a4e7a9a55 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -230,7 +230,7 @@ def get_timerange_backtest_live_models(config: Config) -> str: dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) timerange: TimeRange = TimeRange() - if not config.get("save_live_data_backtest", True): + if not config.get("save_live_data_backtest", False): timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) else: timerange = dk.get_timerange_from_backtesting_live_dataframe() From 913749c81bad3c85c882391bf0b6341967b0e89a Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 10:30:16 -0300 Subject: [PATCH 09/71] backtesting_from_live_saved_files - code refactoring --- docs/freqai-running.md | 2 +- freqtrade/freqai/data_kitchen.py | 27 +++++++++++---------------- freqtrade/freqai/freqai_interface.py | 2 -- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/docs/freqai-running.md b/docs/freqai-running.md index f97ed0ab4..d2f9595be 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -81,7 +81,7 @@ To save the models generated during a particular backtest so that you can start ### Backtest live models -FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `True` in the config. +FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `False` in the config. The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index c7fae7770..d5427c4a5 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1519,15 +1519,13 @@ class FreqaiDataKitchen: pair_path = pair.split(":")[0].replace("/", "_").lower() file_name = f"live_backtesting_{pair_path}.feather" - path_to_live_backtesting_file = Path(self.full_path / - self.backtesting_live_model_folder_path / - file_name) - path_to_live_backtesting_bkp_file = Path(self.full_path / - self.backtesting_live_model_folder_path / - file_name.replace(".feather", ".backup.feather")) - - self.backtesting_live_model_path = path_to_live_backtesting_file - self.backtesting_live_model_bkp_path = path_to_live_backtesting_bkp_file + self.backtesting_live_model_path = Path(self.full_path / + self.backtesting_live_model_folder_path / + file_name) + self.backtesting_live_model_bkp_path = Path( + self.full_path / + self.backtesting_live_model_folder_path / + file_name.replace(".feather", ".backup.feather")) def save_backtesting_live_dataframe( self, dataframe: DataFrame, pair: str @@ -1566,15 +1564,12 @@ class FreqaiDataKitchen: return saved_dataframe else: raise OperationalException( - "Saved pair file not found" + "Saved live backtesting dataframe file not found." ) - def get_timerange_from_backtesting_live_dataframe( - self) -> TimeRange: + def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: """ - Returns timerange information based on a FreqAI model directory - :param models_path: FreqAI model path - + Returns timerange information based on live backtesting dataframe file :return: timerange calculated from saved live data """ all_assets_start_dates = [] @@ -1592,7 +1587,7 @@ class FreqaiDataKitchen: all_assets_start_dates.append(saved_dataframe.date.min()) all_assets_end_dates.append(saved_dataframe.date.max()) start_date = min(all_assets_start_dates) - end_date = min(all_assets_end_dates) + end_date = max(all_assets_end_dates) # add 1 day to string timerange to ensure BT module will load all dataframe data end_date = end_date + timedelta(days=1) backtesting_timerange = TimeRange( diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index cc6cd3c9b..8d84d70c5 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -334,8 +334,6 @@ class IFreqaiModel(ABC): FreqaiDataKitchen = Data management/analysis tool associated to present pair only """ pair = metadata["pair"] - dk.return_dataframe = dataframe - dk.return_dataframe = dataframe self.dk.set_backtesting_live_dataframe_path(pair) saved_dataframe = self.dk.get_backtesting_live_dataframe() From 99bff9cbfa149b0c28b91c2736a472aad47c8633 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 10:30:51 -0300 Subject: [PATCH 10/71] backtesting_from_live_saved_files - code refactoring --- freqtrade/freqai/data_kitchen.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index d5427c4a5..ed78cfee5 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1519,9 +1519,10 @@ class FreqaiDataKitchen: pair_path = pair.split(":")[0].replace("/", "_").lower() file_name = f"live_backtesting_{pair_path}.feather" - self.backtesting_live_model_path = Path(self.full_path / - self.backtesting_live_model_folder_path / - file_name) + self.backtesting_live_model_path = Path( + self.full_path / + self.backtesting_live_model_folder_path / + file_name) self.backtesting_live_model_bkp_path = Path( self.full_path / self.backtesting_live_model_folder_path / From 3903b04d3f42f4465d85165913e265c75ffc9f76 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Thu, 17 Nov 2022 15:20:07 -0300 Subject: [PATCH 11/71] save_live_data_backtest - added docs and tests --- docs/freqai-parameter-table.md | 1 + docs/freqai-running.md | 5 ++- freqtrade/freqai/data_kitchen.py | 10 +++-- freqtrade/freqai/freqai_interface.py | 10 ++--- tests/freqai/test_freqai_datakitchen.py | 44 ++++++++++++++++++++ tests/freqai/test_freqai_interface.py | 55 +++++++++++++++++++++++++ 6 files changed, 114 insertions(+), 11 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index c027a12b1..2961b1b8d 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -15,6 +15,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old.
**Datatype:** Positive integer.
Default: `0` (models never expire). | `purge_old_models` | Delete obsolete models.
**Datatype:** Boolean.
Default: `False` (all historic models remain on disk). | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`.
**Datatype:** Boolean.
Default: `False` (no models are saved). +| `save_live_data_backtest` | Save live dataframe during dry/live runs to reuse in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option. | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-running.md b/docs/freqai-running.md index d2f9595be..4c90a4885 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -81,7 +81,10 @@ To save the models generated during a particular backtest so that you can start ### Backtest live models -FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse models generated in dry/run for comparison or other study. For that, you must set `"purge_old_models"` to `False` in the config. +FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. For that, you have 2 options: + +1. Set `"save_live_data_backtest"` to `True` in the config. With this option, FreqAI will save the live dataframe for reuse in backtesting. This option requires less disk space and backtesting will run faster. +2. Set `"purge_old_models"` to `False` and `"save_live_data_backtest"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index ed78cfee5..d93060568 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1541,14 +1541,16 @@ class FreqaiDataKitchen: if self.backtesting_live_model_path.is_file(): saved_dataframe = self.get_backtesting_live_dataframe() concat_dataframe = pd.concat([saved_dataframe, last_row_df]) - concat_dataframe.reset_index(drop=True).to_feather( - self.backtesting_live_model_path, compression_level=9, compression='lz4') + self.save_backtesting_live_dataframe_to_feather(concat_dataframe) else: - last_row_df.reset_index(drop=True).to_feather( - self.backtesting_live_model_path, compression_level=9, compression='lz4') + self.save_backtesting_live_dataframe_to_feather(last_row_df) shutil.copy(self.backtesting_live_model_path, self.backtesting_live_model_bkp_path) + def save_backtesting_live_dataframe_to_feather(self, dataframe: DataFrame): + dataframe.reset_index(drop=True).to_feather( + self.backtesting_live_model_path, compression_level=9, compression='lz4') + def get_backtesting_live_dataframe( self ) -> DataFrame: diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 8d84d70c5..a0dac5725 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -694,7 +694,8 @@ class IFreqaiModel(ABC): for label in full_labels: if self.dd.historic_predictions[dk.pair][label].dtype == object: continue - f = spy.stats.norm.fit(self.dd.historic_predictions[dk.pair][label].tail(num_candles)) + f = spy.stats.norm.fit( + self.dd.historic_predictions[dk.pair][label].fillna(0).tail(num_candles)) dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1] return @@ -882,11 +883,7 @@ class IFreqaiModel(ABC): if index >= fit_live_predictions_candles: self.dd.historic_predictions[self.dk.pair] = ( dk.full_df.iloc[index - fit_live_predictions_candles:index]) - else: - self.dd.historic_predictions[self.dk.pair] = dk.full_df.iloc[:index] - - self.fit_live_predictions(self.dk, self.dk.pair) - if index >= fit_live_predictions_candles: + self.fit_live_predictions(self.dk, self.dk.pair) for label in label_columns: if dk.full_df[label].dtype == object: continue @@ -899,6 +896,7 @@ class IFreqaiModel(ABC): for extra_col in self.dk.data["extra_returns_per_train"]: dk.full_df.at[index, f"{extra_col}"] = ( self.dk.data["extra_returns_per_train"][extra_col]) + return # Following methods which are overridden by user made prediction models. diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 9abe60edb..ca7c19c94 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -259,3 +259,47 @@ def test_get_full_model_path(mocker, freqai_conf, model): model_path = freqai.dk.get_full_models_path(freqai_conf) assert model_path.is_dir() is True + + +def test_save_backtesting_live_dataframe(mocker, freqai_conf): + freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) + dataframe_without_last_candle = dataframe.copy() + dataframe_without_last_candle.drop(dataframe.tail(1).index, inplace=True) + freqai_conf.update({"save_live_data_backtest": True}) + freqai.dk.save_backtesting_live_dataframe(dataframe_without_last_candle, "ADA/BTC") + saved_dataframe = freqai.dk.get_backtesting_live_dataframe() + assert len(saved_dataframe) == 1 + assert saved_dataframe.iloc[-1, 0] == dataframe_without_last_candle.iloc[-1, 0] + freqai.dk.save_backtesting_live_dataframe(dataframe, "ADA/BTC") + saved_dataframe = freqai.dk.get_backtesting_live_dataframe() + assert len(saved_dataframe) == 2 + assert saved_dataframe.iloc[-1, 0] == dataframe.iloc[-1, 0] + assert saved_dataframe.iloc[-2, 0] == dataframe.iloc[-2, 0] + + +def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): + freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) + freqai_conf.update({"save_live_data_backtest": True}) + freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") + freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe) + timerange = freqai.dk.get_timerange_from_backtesting_live_dataframe() + assert timerange.startts == 1516406400 + assert timerange.stopts == 1517356500 + + +def test_get_timerange_from_backtesting_live_dataframe_folder_not_found(mocker, freqai_conf): + freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) + with pytest.raises( + OperationalException, + match=r'Saved live data not found.*' + ): + freqai.dk.get_timerange_from_backtesting_live_dataframe() + + +def test_saved_live_bt_file_not_found(mocker, freqai_conf): + freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) + with pytest.raises( + OperationalException, + match=r'.*live backtesting dataframe file not found.*' + ): + freqai.dk.get_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 25bc99580..ed634de55 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -300,6 +300,61 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): shutil.rmtree(Path(freqai.dk.full_path)) +def test_start_backtesting_from_saved_live_dataframe(mocker, freqai_conf, caplog): + freqai_conf.update({"save_live_data_backtest": True}) + freqai_conf.update({"freqai_backtest_live_models": True}) + + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.live = False + freqai.dk = FreqaiDataKitchen(freqai_conf) + timerange = TimeRange.parse_timerange("20180110-20180130") + freqai.dd.load_all_pair_histories(timerange, freqai.dk) + sub_timerange = TimeRange.parse_timerange("20180110-20180130") + corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) + df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") + metadata = {"pair": "ADA/BTC"} + + # create a dummy live dataframe file with 10 rows + dataframe_predictions = df.tail(10).copy() + dataframe_predictions["&s_close"] = dataframe_predictions["close"] * 1.1 + freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") + freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe_predictions) + + freqai.start_backtesting_from_live_saved_files(df, metadata, freqai.dk) + assert len(freqai.dk.return_dataframe) == len(df) + assert len(freqai.dk.return_dataframe[freqai.dk.return_dataframe["&s_close"] > 0]) == ( + len(dataframe_predictions)) + shutil.rmtree(Path(freqai.dk.full_path)) + + +def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): + freqai_conf.get("freqai", {}).update({"fit_live_predictions_candles": 10}) + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + strategy.freqai_info = freqai_conf.get("freqai", {}) + freqai = strategy.freqai + freqai.live = False + freqai.dk = FreqaiDataKitchen(freqai_conf) + timerange = TimeRange.parse_timerange("20180128-20180130") + freqai.dd.load_all_pair_histories(timerange, freqai.dk) + sub_timerange = TimeRange.parse_timerange("20180129-20180130") + corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) + df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") + freqai.dk.pair = "ADA/BTC" + freqai.dk.full_df = df + assert "&-s_close_mean" not in freqai.dk.full_df.columns + assert "&-s_close_std" not in freqai.dk.full_df.columns + freqai.backtesting_fit_live_predictions(freqai.dk) + assert "&-s_close_mean" in freqai.dk.full_df.columns + assert "&-s_close_std" in freqai.dk.full_df.columns + shutil.rmtree(Path(freqai.dk.full_path)) + + def test_follow_mode(mocker, freqai_conf): freqai_conf.update({"timerange": "20180110-20180130"}) From 80d070e9eed2a05980818af817594c6cae0b0f9a Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Sat, 19 Nov 2022 14:15:58 -0300 Subject: [PATCH 12/71] update code to use historic_predictions for freqai_backtest_live_models --- docs/freqai-parameter-table.md | 2 +- docs/freqai-running.md | 4 +- freqtrade/freqai/data_drawer.py | 21 +++++ freqtrade/freqai/data_kitchen.py | 114 +++++------------------- freqtrade/freqai/freqai_interface.py | 53 +++++++---- freqtrade/freqai/utils.py | 2 +- tests/freqai/test_freqai_datakitchen.py | 33 +------ tests/freqai/test_freqai_interface.py | 31 ------- 8 files changed, 86 insertions(+), 174 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 2961b1b8d..059d56a1f 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -15,7 +15,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old.
**Datatype:** Positive integer.
Default: `0` (models never expire). | `purge_old_models` | Delete obsolete models.
**Datatype:** Boolean.
Default: `False` (all historic models remain on disk). | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`.
**Datatype:** Boolean.
Default: `False` (no models are saved). -| `save_live_data_backtest` | Save live dataframe during dry/live runs to reuse in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option. +| `backtest_using_historic_predictions` | Reuse `historic_predictions` in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option.
Default: `True` | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-running.md b/docs/freqai-running.md index 4c90a4885..d777b180e 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -83,8 +83,8 @@ To save the models generated during a particular backtest so that you can start FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. For that, you have 2 options: -1. Set `"save_live_data_backtest"` to `True` in the config. With this option, FreqAI will save the live dataframe for reuse in backtesting. This option requires less disk space and backtesting will run faster. -2. Set `"purge_old_models"` to `False` and `"save_live_data_backtest"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. +1. Set `"backtest_using_historic_predictions"` to `True` in the config. With this option, FreqAI will reuse `historic_predictions` in backtesting. This option requires less disk space and backtesting will run faster. +2. Set `"purge_old_models"` to `False` and `"backtest_using_historic_predictions"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 038ddaf2e..e83b05aaa 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -81,6 +81,7 @@ class FreqaiDataDrawer: self.historic_predictions_bkp_path = Path( self.full_path / "historic_predictions.backup.pkl") self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json") + self.global_metadata_path = Path(self.full_path / "global_metadata.json") self.metric_tracker_path = Path(self.full_path / "metric_tracker.json") self.follow_mode = follow_mode if follow_mode: @@ -125,6 +126,17 @@ class FreqaiDataDrawer: self.update_metric_tracker('cpu_load5min', load5 / cpus, pair) self.update_metric_tracker('cpu_load15min', load15 / cpus, pair) + def load_global_metadata_from_disk(self): + """ + Locate and load a previously saved global metadata in present model folder. + """ + exists = self.global_metadata_path.is_file() + if exists: + with open(self.global_metadata_path, "r") as fp: + metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE) + return metatada_dict + return {} + def load_drawer_from_disk(self): """ Locate and load a previously saved data drawer full of all pair model metadata in @@ -225,6 +237,15 @@ class FreqaiDataDrawer: rapidjson.dump(self.follower_dict, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE) + def save_global_metadata_to_disk(self, metadata: Dict[str, Any]): + """ + Save global metadata json to disk + """ + with self.save_lock: + with open(self.global_metadata_path, 'w') as fp: + rapidjson.dump(metadata, fp, default=self.np_encoder, + number_mode=rapidjson.NM_NATIVE) + def create_follower_dict(self): """ Create or dictionary for each follower to maintain unique persistent prediction targets diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index be2fb68b1..641c95725 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -from pandas import DataFrame, read_feather +from pandas import DataFrame from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -74,9 +74,6 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() - self.backtesting_live_model_folder_path = Path() - self.backtesting_live_model_path = Path() - self.backtesting_live_model_bkp_path = Path() self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -90,7 +87,9 @@ class FreqaiDataKitchen: self.full_path = self.get_full_models_path(self.config) if self.backtest_live_models: - if self.pair: + if self.pair and not ( + self.freqai_config.get("backtest_using_historic_predictions", True) + ): self.set_timerange_from_ready_models() (self.training_timeranges, self.backtesting_timeranges) = self.split_timerange_live_models() @@ -1488,101 +1487,30 @@ class FreqaiDataKitchen: return dataframe - def set_backtesting_live_dataframe_folder_path( - self - ) -> None: - """ - Set live backtesting dataframe path - :param pair: current pair - """ - self.backtesting_live_model_folder_path = Path( - self.full_path / self.backtest_predictions_folder / "live_data") - - def set_backtesting_live_dataframe_path( - self, pair: str - ) -> None: - """ - Set live backtesting dataframe path - :param pair: current pair - """ - self.set_backtesting_live_dataframe_folder_path() - if not self.backtesting_live_model_folder_path.is_dir(): - self.backtesting_live_model_folder_path.mkdir(parents=True, exist_ok=True) - - pair_path = pair.split(":")[0].replace("/", "_").lower() - file_name = f"live_backtesting_{pair_path}.feather" - self.backtesting_live_model_path = Path( - self.full_path / - self.backtesting_live_model_folder_path / - file_name) - self.backtesting_live_model_bkp_path = Path( - self.full_path / - self.backtesting_live_model_folder_path / - file_name.replace(".feather", ".backup.feather")) - - def save_backtesting_live_dataframe( - self, dataframe: DataFrame, pair: str - ) -> None: - """ - Save live backtesting dataframe to feather file format - :param dataframe: current live dataframe - :param pair: current pair - """ - self.set_backtesting_live_dataframe_path(pair) - last_row_df = dataframe.tail(1) - if self.backtesting_live_model_path.is_file(): - saved_dataframe = self.get_backtesting_live_dataframe() - concat_dataframe = pd.concat([saved_dataframe, last_row_df]) - self.save_backtesting_live_dataframe_to_feather(concat_dataframe) - else: - self.save_backtesting_live_dataframe_to_feather(last_row_df) - - shutil.copy(self.backtesting_live_model_path, self.backtesting_live_model_bkp_path) - - def save_backtesting_live_dataframe_to_feather(self, dataframe: DataFrame): - dataframe.reset_index(drop=True).to_feather( - self.backtesting_live_model_path, compression_level=9, compression='lz4') - - def get_backtesting_live_dataframe( - self - ) -> DataFrame: - """ - Get live backtesting dataframe from feather file format - return: saved dataframe from previous dry/run or live - """ - if self.backtesting_live_model_path.is_file(): - saved_dataframe = DataFrame() - try: - saved_dataframe = read_feather(self.backtesting_live_model_path) - except Exception: - saved_dataframe = read_feather(self.backtesting_live_model_bkp_path) - return saved_dataframe - else: - raise OperationalException( - "Saved live backtesting dataframe file not found." - ) - def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: """ - Returns timerange information based on live backtesting dataframe file + Returns timerange information based on historic predictions file :return: timerange calculated from saved live data """ - all_assets_start_dates = [] - all_assets_end_dates = [] - self.set_backtesting_live_dataframe_folder_path() - if not self.backtesting_live_model_folder_path.is_dir(): + from freqtrade.freqai.data_drawer import FreqaiDataDrawer + dd = FreqaiDataDrawer(Path(self.full_path), self.config) + if not dd.historic_predictions_path.is_file(): raise OperationalException( - 'Saved live data not found. Saved lived data is required ' + 'Historic predictions not found. Historic predictions data is required ' 'to run backtest with the freqai-backtest-live-models option ' - 'and save_live_data_backtest config option as true' + 'and backtest_using_historic_predictions config option as true' ) - for file_in_dir in self.backtesting_live_model_folder_path.iterdir(): - if file_in_dir.is_file() and "backup" not in file_in_dir.name: - saved_dataframe = read_feather(file_in_dir) - all_assets_start_dates.append(saved_dataframe.date.min()) - all_assets_end_dates.append(saved_dataframe.date.max()) - start_date = min(all_assets_start_dates) - end_date = max(all_assets_end_dates) + + dd.load_historic_predictions_from_disk() + + all_pairs_end_dates = [] + for pair in dd.historic_predictions: + pair_historic_data = dd.historic_predictions[pair] + all_pairs_end_dates.append(pair_historic_data.date_pred.max()) + + global_metadata = dd.load_global_metadata_from_disk() + start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"])) + end_date = max(all_pairs_end_dates) # add 1 day to string timerange to ensure BT module will load all dataframe data end_date = end_date + timedelta(days=1) backtesting_timerange = TimeRange( diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index c48758df4..473fe939f 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -53,6 +53,7 @@ class IFreqaiModel(ABC): def __init__(self, config: Config) -> None: self.config = config + self.metadata: Dict[str, Any] = {} self.assert_config(self.config) self.freqai_info: Dict[str, Any] = config["freqai"] self.data_split_parameters: Dict[str, Any] = config.get("freqai", {}).get( @@ -67,10 +68,10 @@ class IFreqaiModel(ABC): self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True) if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') - self.save_live_data_backtest: bool = self.freqai_info.get( - "save_live_data_backtest", False) - if self.save_live_data_backtest: - logger.info('Live configured to save data for backtest.') + self.backtest_using_historic_predictions: bool = self.freqai_info.get( + "backtest_using_historic_predictions", True) + if self.backtest_using_historic_predictions: + logger.info('Backtesting live models configured to use historic predictions.') self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) # set current candle to arbitrary historical date @@ -103,6 +104,7 @@ class IFreqaiModel(ABC): self.get_corr_dataframes: bool = True self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() + self.metadata = self.dd.load_global_metadata_from_disk() record_params(config, self.full_path) @@ -136,6 +138,7 @@ class IFreqaiModel(ABC): self.inference_timer('start') self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) dk = self.start_live(dataframe, metadata, strategy, self.dk) + dataframe = dk.remove_features_from_df(dk.return_dataframe) # For backtesting, each pair enters and then gets trained for each window along the # sliding window defined by "train_period_days" (training window) and "live_retrain_hours" @@ -145,14 +148,19 @@ class IFreqaiModel(ABC): elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) if self.dk.backtest_live_models: - logger.info( - f"Backtesting {len(self.dk.backtesting_timeranges)} timeranges (live models)") + if self.backtest_using_historic_predictions: + logger.info( + "Backtesting using historic predictions (live models)") + else: + logger.info( + f"Backtesting {len(self.dk.backtesting_timeranges)} " + "timeranges (live models)") else: logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dataframe = self.dk.use_strategy_to_populate_indicators( strategy, prediction_dataframe=dataframe, pair=metadata["pair"] ) - if not self.save_live_data_backtest: + if not self.backtest_using_historic_predictions: dk = self.start_backtesting(dataframe, metadata, self.dk) dataframe = dk.remove_features_from_df(dk.return_dataframe) else: @@ -163,8 +171,7 @@ class IFreqaiModel(ABC): self.clean_up() if self.live: self.inference_timer('stop', metadata["pair"]) - if self.save_live_data_backtest: - dk.save_backtesting_live_dataframe(dataframe, metadata["pair"]) + self.set_start_dry_live_date(dataframe) return dataframe @@ -335,14 +342,12 @@ class IFreqaiModel(ABC): """ pair = metadata["pair"] dk.return_dataframe = dataframe - self.dk.set_backtesting_live_dataframe_path(pair) - saved_dataframe = self.dk.get_backtesting_live_dataframe() - columns_to_drop = list(set(dk.return_dataframe.columns).difference( - ["date", "open", "high", "low", "close", "volume"])) - saved_dataframe = saved_dataframe.drop( - columns=["open", "high", "low", "close", "volume"]) + saved_dataframe = self.dd.historic_predictions[pair] + columns_to_drop = list(set(saved_dataframe.columns).intersection( + dk.return_dataframe.columns)) dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) - dk.return_dataframe = pd.merge(dk.return_dataframe, saved_dataframe, how='left', on='date') + dk.return_dataframe = pd.merge( + dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred") # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) return dk @@ -886,6 +891,22 @@ class IFreqaiModel(ABC): return + def update_metadata(self, metadata: Dict[str, Any]): + """ + Update global metadata and save the updated json file + :param metadata: new global metadata dict + """ + self.dd.save_global_metadata_to_disk(metadata) + self.metadata = metadata + + def set_start_dry_live_date(self, live_dataframe: DataFrame): + key_name = "start_dry_live_date" + if key_name not in self.metadata: + metadata = self.metadata + metadata[key_name] = int( + pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp()) + self.update_metadata(metadata) + # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index c9efe6a3c..f42a87be7 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -230,7 +230,7 @@ def get_timerange_backtest_live_models(config: Config) -> str: dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) timerange: TimeRange = TimeRange() - if not config.get("save_live_data_backtest", False): + if not config.get("freqai", {}).get("backtest_using_historic_predictions", True): timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) else: timerange = dk.get_timerange_from_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index ca7c19c94..2dbbd7ef5 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -261,45 +261,18 @@ def test_get_full_model_path(mocker, freqai_conf, model): assert model_path.is_dir() is True -def test_save_backtesting_live_dataframe(mocker, freqai_conf): - freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) - dataframe_without_last_candle = dataframe.copy() - dataframe_without_last_candle.drop(dataframe.tail(1).index, inplace=True) - freqai_conf.update({"save_live_data_backtest": True}) - freqai.dk.save_backtesting_live_dataframe(dataframe_without_last_candle, "ADA/BTC") - saved_dataframe = freqai.dk.get_backtesting_live_dataframe() - assert len(saved_dataframe) == 1 - assert saved_dataframe.iloc[-1, 0] == dataframe_without_last_candle.iloc[-1, 0] - freqai.dk.save_backtesting_live_dataframe(dataframe, "ADA/BTC") - saved_dataframe = freqai.dk.get_backtesting_live_dataframe() - assert len(saved_dataframe) == 2 - assert saved_dataframe.iloc[-1, 0] == dataframe.iloc[-1, 0] - assert saved_dataframe.iloc[-2, 0] == dataframe.iloc[-2, 0] - - def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) - freqai_conf.update({"save_live_data_backtest": True}) - freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") - freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe) + freqai_conf.update({"backtest_using_historic_predictions": True}) timerange = freqai.dk.get_timerange_from_backtesting_live_dataframe() assert timerange.startts == 1516406400 assert timerange.stopts == 1517356500 -def test_get_timerange_from_backtesting_live_dataframe_folder_not_found(mocker, freqai_conf): +def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_conf): freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) with pytest.raises( OperationalException, - match=r'Saved live data not found.*' + match=r'Historic predictions not found.*' ): freqai.dk.get_timerange_from_backtesting_live_dataframe() - - -def test_saved_live_bt_file_not_found(mocker, freqai_conf): - freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) - with pytest.raises( - OperationalException, - match=r'.*live backtesting dataframe file not found.*' - ): - freqai.dk.get_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index ed634de55..66b3bac17 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -300,37 +300,6 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): shutil.rmtree(Path(freqai.dk.full_path)) -def test_start_backtesting_from_saved_live_dataframe(mocker, freqai_conf, caplog): - freqai_conf.update({"save_live_data_backtest": True}) - freqai_conf.update({"freqai_backtest_live_models": True}) - - strategy = get_patched_freqai_strategy(mocker, freqai_conf) - exchange = get_patched_exchange(mocker, freqai_conf) - strategy.dp = DataProvider(freqai_conf, exchange) - strategy.freqai_info = freqai_conf.get("freqai", {}) - freqai = strategy.freqai - freqai.live = False - freqai.dk = FreqaiDataKitchen(freqai_conf) - timerange = TimeRange.parse_timerange("20180110-20180130") - freqai.dd.load_all_pair_histories(timerange, freqai.dk) - sub_timerange = TimeRange.parse_timerange("20180110-20180130") - corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) - df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") - metadata = {"pair": "ADA/BTC"} - - # create a dummy live dataframe file with 10 rows - dataframe_predictions = df.tail(10).copy() - dataframe_predictions["&s_close"] = dataframe_predictions["close"] * 1.1 - freqai.dk.set_backtesting_live_dataframe_path("ADA/BTC") - freqai.dk.save_backtesting_live_dataframe_to_feather(dataframe_predictions) - - freqai.start_backtesting_from_live_saved_files(df, metadata, freqai.dk) - assert len(freqai.dk.return_dataframe) == len(df) - assert len(freqai.dk.return_dataframe[freqai.dk.return_dataframe["&s_close"] > 0]) == ( - len(dataframe_predictions)) - shutil.rmtree(Path(freqai.dk.full_path)) - - def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): freqai_conf.get("freqai", {}).update({"fit_live_predictions_candles": 10}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) From fdc82af883d4e6601ab7468e73a748ecc2d11fd0 Mon Sep 17 00:00:00 2001 From: Wagner Costa Santos Date: Sat, 19 Nov 2022 22:27:58 -0300 Subject: [PATCH 13/71] fix tests - update code to backtest with historic_predictions --- freqtrade/freqai/data_drawer.py | 31 ++++++++++++++++++++- freqtrade/freqai/data_kitchen.py | 31 --------------------- freqtrade/freqai/utils.py | 4 ++- tests/freqai/test_freqai_backtesting.py | 2 ++ tests/freqai/test_freqai_datadrawer.py | 37 +++++++++++++++++++++++++ tests/freqai/test_freqai_datakitchen.py | 18 +----------- 6 files changed, 73 insertions(+), 50 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index e83b05aaa..59b8e2684 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -3,7 +3,7 @@ import logging import re import shutil import threading -from datetime import datetime, timezone +from datetime import datetime, timedelta, timezone from pathlib import Path from typing import Any, Dict, Tuple, TypedDict @@ -714,3 +714,32 @@ class FreqaiDataDrawer: ).reset_index(drop=True) return corr_dataframes, base_dataframes + + def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: + """ + Returns timerange information based on historic predictions file + :return: timerange calculated from saved live data + """ + if not self.historic_predictions_path.is_file(): + raise OperationalException( + 'Historic predictions not found. Historic predictions data is required ' + 'to run backtest with the freqai-backtest-live-models option ' + 'and backtest_using_historic_predictions config option as true' + ) + + self.load_historic_predictions_from_disk() + + all_pairs_end_dates = [] + for pair in self.historic_predictions: + pair_historic_data = self.historic_predictions[pair] + all_pairs_end_dates.append(pair_historic_data.date_pred.max()) + + global_metadata = self.load_global_metadata_from_disk() + start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"])) + end_date = max(all_pairs_end_dates) + # add 1 day to string timerange to ensure BT module will load all dataframe data + end_date = end_date + timedelta(days=1) + backtesting_timerange = TimeRange( + 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) + ) + return backtesting_timerange diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 641c95725..b364f4e7e 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1486,34 +1486,3 @@ class FreqaiDataKitchen: dataframe.columns = dataframe.columns.str.replace(c, "") return dataframe - - def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: - """ - Returns timerange information based on historic predictions file - :return: timerange calculated from saved live data - """ - from freqtrade.freqai.data_drawer import FreqaiDataDrawer - dd = FreqaiDataDrawer(Path(self.full_path), self.config) - if not dd.historic_predictions_path.is_file(): - raise OperationalException( - 'Historic predictions not found. Historic predictions data is required ' - 'to run backtest with the freqai-backtest-live-models option ' - 'and backtest_using_historic_predictions config option as true' - ) - - dd.load_historic_predictions_from_disk() - - all_pairs_end_dates = [] - for pair in dd.historic_predictions: - pair_historic_data = dd.historic_predictions[pair] - all_pairs_end_dates.append(pair_historic_data.date_pred.max()) - - global_metadata = dd.load_global_metadata_from_disk() - start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"])) - end_date = max(all_pairs_end_dates) - # add 1 day to string timerange to ensure BT module will load all dataframe data - end_date = end_date + timedelta(days=1) - backtesting_timerange = TimeRange( - 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) - ) - return backtesting_timerange diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index f42a87be7..fd5d448bd 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -14,6 +14,7 @@ from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_seconds from freqtrade.exchange.exchange import market_is_active +from freqtrade.freqai.data_drawer import FreqaiDataDrawer from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist @@ -233,6 +234,7 @@ def get_timerange_backtest_live_models(config: Config) -> str: if not config.get("freqai", {}).get("backtest_using_historic_predictions", True): timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) else: - timerange = dk.get_timerange_from_backtesting_live_dataframe() + dd = FreqaiDataDrawer(models_path, config) + timerange = dd.get_timerange_from_backtesting_live_dataframe() return timerange.timerange_str diff --git a/tests/freqai/test_freqai_backtesting.py b/tests/freqai/test_freqai_backtesting.py index b9e2d650a..49b27f724 100644 --- a/tests/freqai/test_freqai_backtesting.py +++ b/tests/freqai/test_freqai_backtesting.py @@ -65,6 +65,8 @@ def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testda mocker.patch('freqtrade.optimize.backtesting.history.load_data') mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now)) freqai_conf["timerange"] = "" + freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False}) + patched_configuration_load_config_file(mocker, freqai_conf) args = [ diff --git a/tests/freqai/test_freqai_datadrawer.py b/tests/freqai/test_freqai_datadrawer.py index 7ab963507..3abf84586 100644 --- a/tests/freqai/test_freqai_datadrawer.py +++ b/tests/freqai/test_freqai_datadrawer.py @@ -2,8 +2,11 @@ import shutil from pathlib import Path +import pytest + from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider +from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from tests.conftest import get_patched_exchange from tests.freqai.conftest import get_patched_freqai_strategy @@ -93,3 +96,37 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf): assert len(df.columns) == 33 shutil.rmtree(Path(freqai.dk.full_path)) + + +def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + freqai = strategy.freqai + freqai.live = True + freqai.dk = FreqaiDataKitchen(freqai_conf) + timerange = TimeRange.parse_timerange("20180126-20180130") + freqai.dd.load_all_pair_histories(timerange, freqai.dk) + sub_timerange = TimeRange.parse_timerange("20180128-20180130") + _, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "ADA/BTC", freqai.dk) + base_df["5m"]["date_pred"] = base_df["5m"]["date"] + freqai.dd.historic_predictions = {} + freqai.dd.historic_predictions["ADA/USDT"] = base_df["5m"] + freqai.dd.save_historic_predictions_to_disk() + freqai.dd.save_global_metadata_to_disk({"start_dry_live_date": 1516406400}) + + timerange = freqai.dd.get_timerange_from_backtesting_live_dataframe() + assert timerange.startts == 1516406400 + assert timerange.stopts == 1517356500 + + +def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_conf): + strategy = get_patched_freqai_strategy(mocker, freqai_conf) + exchange = get_patched_exchange(mocker, freqai_conf) + strategy.dp = DataProvider(freqai_conf, exchange) + freqai = strategy.freqai + with pytest.raises( + OperationalException, + match=r'Historic predictions not found.*' + ): + freqai.dd.get_timerange_from_backtesting_live_dataframe() diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 2dbbd7ef5..4dfc75d38 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -190,6 +190,7 @@ def test_get_timerange_from_ready_models(mocker, freqai_conf, model): freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) + freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) @@ -259,20 +260,3 @@ def test_get_full_model_path(mocker, freqai_conf, model): model_path = freqai.dk.get_full_models_path(freqai_conf) assert model_path.is_dir() is True - - -def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): - freqai, dataframe = make_unfiltered_dataframe(mocker, freqai_conf) - freqai_conf.update({"backtest_using_historic_predictions": True}) - timerange = freqai.dk.get_timerange_from_backtesting_live_dataframe() - assert timerange.startts == 1516406400 - assert timerange.stopts == 1517356500 - - -def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_conf): - freqai, _ = make_unfiltered_dataframe(mocker, freqai_conf) - with pytest.raises( - OperationalException, - match=r'Historic predictions not found.*' - ): - freqai.dk.get_timerange_from_backtesting_live_dataframe() From c01f25ddc95f6dbdf91b3dddd52cda4bcbf57428 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Tue, 22 Nov 2022 13:09:09 -0300 Subject: [PATCH 14/71] update code to freqai_backtest_live_models only from historic predictions --- docs/freqai-parameter-table.md | 1 - docs/freqai-running.md | 12 +- freqtrade/freqai/data_drawer.py | 3 +- freqtrade/freqai/data_kitchen.py | 144 ++++-------------------- freqtrade/freqai/freqai_interface.py | 19 +--- freqtrade/freqai/utils.py | 9 +- tests/freqai/test_freqai_backtesting.py | 2 +- tests/freqai/test_freqai_datadrawer.py | 6 +- tests/freqai/test_freqai_datakitchen.py | 67 ----------- 9 files changed, 36 insertions(+), 227 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 059d56a1f..c027a12b1 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -15,7 +15,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `expiration_hours` | Avoid making predictions if a model is more than `expiration_hours` old.
**Datatype:** Positive integer.
Default: `0` (models never expire). | `purge_old_models` | Delete obsolete models.
**Datatype:** Boolean.
Default: `False` (all historic models remain on disk). | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`.
**Datatype:** Boolean.
Default: `False` (no models are saved). -| `backtest_using_historic_predictions` | Reuse `historic_predictions` in backtesting with [Backtest live models](freqai-running.md#backtest_live_models)) option.
Default: `True` | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)).
**Datatype:** Positive integer. | `follow_mode` | Use a `follower` that will look for models associated with a specific `identifier` and load those for inferencing. A `follower` will **not** train new models.
**Datatype:** Boolean.
Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)).
**Datatype:** Boolean.
Default: `False`. diff --git a/docs/freqai-running.md b/docs/freqai-running.md index d777b180e..23873547f 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -81,17 +81,9 @@ To save the models generated during a particular backtest so that you can start ### Backtest live models -FreqAI allow you to reuse ready models through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. For that, you have 2 options: +FreqAI allow you to reuse live historic predictions through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. -1. Set `"backtest_using_historic_predictions"` to `True` in the config. With this option, FreqAI will reuse `historic_predictions` in backtesting. This option requires less disk space and backtesting will run faster. -2. Set `"purge_old_models"` to `False` and `"backtest_using_historic_predictions"` to `False` in the config. In this case, FreqAI will use the saved models to make the predictions in backtesting. This option requires more disk space and the backtest will have a longer execution time. - -The `--timerange` parameter must not be informed, as it will be automatically calculated through the training end dates of the models. - -Each model has an identifier derived from the training end date. If you have only 1 model trained, FreqAI will backtest from the training end date until the current date. If you have more than 1 model, each model will perform the backtesting according to the training end date until the training end date of the next model and so on. For the last model, the period of the previous model will be used for the execution. - -!!! Note - Currently, there is no checking for expired models, even if the `expired_hours` parameter is set. +The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in historic predictions file. ### Downloading data to cover the full backtest period diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 59b8e2684..9f1e27796 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -715,7 +715,7 @@ class FreqaiDataDrawer: return corr_dataframes, base_dataframes - def get_timerange_from_backtesting_live_dataframe(self) -> TimeRange: + def get_timerange_from_live_historic_predictions(self) -> TimeRange: """ Returns timerange information based on historic predictions file :return: timerange calculated from saved live data @@ -724,7 +724,6 @@ class FreqaiDataDrawer: raise OperationalException( 'Historic predictions not found. Historic predictions data is required ' 'to run backtest with the freqai-backtest-live-models option ' - 'and backtest_using_historic_predictions config option as true' ) self.load_historic_predictions_from_disk() diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index b364f4e7e..f75fd3dd8 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1,7 +1,7 @@ import copy import logging import shutil -from datetime import datetime, timedelta, timezone +from datetime import datetime, timezone from math import cos, sin from pathlib import Path from typing import Any, Dict, List, Tuple @@ -86,14 +86,7 @@ class FreqaiDataKitchen: if not self.live: self.full_path = self.get_full_models_path(self.config) - if self.backtest_live_models: - if self.pair and not ( - self.freqai_config.get("backtest_using_historic_predictions", True) - ): - self.set_timerange_from_ready_models() - (self.training_timeranges, - self.backtesting_timeranges) = self.split_timerange_live_models() - else: + if not self.backtest_live_models: self.full_timerange = self.create_fulltimerange( self.config["timerange"], self.freqai_config.get("train_period_days", 0) ) @@ -458,28 +451,28 @@ class FreqaiDataKitchen: # print(tr_training_list, tr_backtesting_list) return tr_training_list_timerange, tr_backtesting_list_timerange - def split_timerange_live_models( - self - ) -> Tuple[list, list]: + # def split_timerange_live_models( + # self + # ) -> Tuple[list, list]: - tr_backtesting_list_timerange = [] - asset = self.pair.split("/")[0] - if asset not in self.backtest_live_models_data["assets_end_dates"]: - raise OperationalException( - f"Model not available for pair {self.pair}. " - "Please, try again after removing this pair from the configuration file." - ) - asset_data = self.backtest_live_models_data["assets_end_dates"][asset] - backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"] - model_end_dates = [x for x in asset_data] - model_end_dates.append(backtesting_timerange.stopts) - model_end_dates.sort() - for index, item in enumerate(model_end_dates): - if len(model_end_dates) > (index + 1): - tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1]) - tr_backtesting_list_timerange.append(tr_to_add) + # tr_backtesting_list_timerange = [] + # asset = self.pair.split("/")[0] + # if asset not in self.backtest_live_models_data["assets_end_dates"]: + # raise OperationalException( + # f"Model not available for pair {self.pair}. " + # "Please, try again after removing this pair from the configuration file." + # ) + # asset_data = self.backtest_live_models_data["assets_end_dates"][asset] + # backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"] + # model_end_dates = [x for x in asset_data] + # model_end_dates.append(backtesting_timerange.stopts) + # model_end_dates.sort() + # for index, item in enumerate(model_end_dates): + # if len(model_end_dates) > (index + 1): + # tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1]) + # tr_backtesting_list_timerange.append(tr_to_add) - return tr_backtesting_list_timerange, tr_backtesting_list_timerange + # return tr_backtesting_list_timerange, tr_backtesting_list_timerange def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame: """ @@ -1371,17 +1364,6 @@ class FreqaiDataKitchen: ) return False - def set_timerange_from_ready_models(self): - backtesting_timerange, \ - assets_end_dates = ( - self.get_timerange_and_assets_end_dates_from_ready_models(self.full_path)) - - self.backtest_live_models_data = { - "backtesting_timerange": backtesting_timerange, - "assets_end_dates": assets_end_dates - } - return - def get_full_models_path(self, config: Config) -> Path: """ Returns default FreqAI model path @@ -1392,88 +1374,6 @@ class FreqaiDataKitchen: config["user_data_dir"] / "models" / str(freqai_config.get("identifier")) ) - def get_timerange_and_assets_end_dates_from_ready_models( - self, models_path: Path) -> Tuple[TimeRange, Dict[str, Any]]: - """ - Returns timerange information based on a FreqAI model directory - :param models_path: FreqAI model path - - :return: a Tuple with (Timerange calculated from directory and - a Dict with pair and model end training dates info) - """ - all_models_end_dates = [] - assets_end_dates: Dict[str, Any] = self.get_assets_timestamps_training_from_ready_models( - models_path) - for key in assets_end_dates: - for model_end_date in assets_end_dates[key]: - if model_end_date not in all_models_end_dates: - all_models_end_dates.append(model_end_date) - - if len(all_models_end_dates) == 0: - raise OperationalException( - 'At least 1 saved model is required to ' - 'run backtest with the freqai-backtest-live-models option' - ) - - if len(all_models_end_dates) == 1: - logger.warning( - "Only 1 model was found. Backtesting will run with the " - "timerange from the end of the training date to the current date" - ) - - finish_timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - if len(all_models_end_dates) > 1: - # After last model end date, use the same period from previous model - # to finish the backtest - all_models_end_dates.sort(reverse=True) - finish_timestamp = all_models_end_dates[0] + \ - (all_models_end_dates[0] - all_models_end_dates[1]) - - all_models_end_dates.append(finish_timestamp) - all_models_end_dates.sort() - start_date = (datetime(*datetime.fromtimestamp(min(all_models_end_dates), - timezone.utc).timetuple()[:3], tzinfo=timezone.utc)) - end_date = (datetime(*datetime.fromtimestamp(max(all_models_end_dates), - timezone.utc).timetuple()[:3], tzinfo=timezone.utc)) - - # add 1 day to string timerange to ensure BT module will load all dataframe data - end_date = end_date + timedelta(days=1) - backtesting_timerange = TimeRange( - 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) - ) - return backtesting_timerange, assets_end_dates - - def get_assets_timestamps_training_from_ready_models( - self, models_path: Path) -> Dict[str, Any]: - """ - Scan the models path and returns all assets end training dates (timestamp) - :param models_path: FreqAI model path - - :return: a Dict with asset and model end training dates info - """ - assets_end_dates: Dict[str, Any] = {} - if not models_path.is_dir(): - raise OperationalException( - 'Model folders not found. Saved models are required ' - 'to run backtest with the freqai-backtest-live-models option' - ) - for model_dir in models_path.iterdir(): - if str(model_dir.name).startswith("sub-train"): - model_end_date = int(model_dir.name.split("_")[1]) - asset = model_dir.name.split("_")[0].replace("sub-train-", "") - model_file_name = ( - f"cb_{str(model_dir.name).replace('sub-train-', '').lower()}" - "_model.joblib" - ) - - model_path_file = Path(model_dir / model_file_name) - if model_path_file.is_file(): - if asset not in assets_end_dates: - assets_end_dates[asset] = [] - assets_end_dates[asset].append(model_end_date) - - return assets_end_dates - def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame: """ Remove all special characters from feature strings (:) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 473fe939f..80348fda8 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -68,10 +68,6 @@ class IFreqaiModel(ABC): self.save_backtest_models: bool = self.freqai_info.get("save_backtest_models", True) if self.save_backtest_models: logger.info('Backtesting module configured to save all models.') - self.backtest_using_historic_predictions: bool = self.freqai_info.get( - "backtest_using_historic_predictions", True) - if self.backtest_using_historic_predictions: - logger.info('Backtesting live models configured to use historic predictions.') self.dd = FreqaiDataDrawer(Path(self.full_path), self.config, self.follow_mode) # set current candle to arbitrary historical date @@ -148,23 +144,18 @@ class IFreqaiModel(ABC): elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) if self.dk.backtest_live_models: - if self.backtest_using_historic_predictions: - logger.info( - "Backtesting using historic predictions (live models)") - else: - logger.info( - f"Backtesting {len(self.dk.backtesting_timeranges)} " - "timeranges (live models)") + logger.info( + "Backtesting using historic predictions (live models)") else: logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dataframe = self.dk.use_strategy_to_populate_indicators( strategy, prediction_dataframe=dataframe, pair=metadata["pair"] ) - if not self.backtest_using_historic_predictions: + if not self.config.get("freqai_backtest_live_models", False): dk = self.start_backtesting(dataframe, metadata, self.dk) dataframe = dk.remove_features_from_df(dk.return_dataframe) else: - dk = self.start_backtesting_from_live_saved_files( + dk = self.start_backtesting_from_historic_predictions( dataframe, metadata, self.dk) dataframe = dk.return_dataframe @@ -330,7 +321,7 @@ class IFreqaiModel(ABC): return dk - def start_backtesting_from_live_saved_files( + def start_backtesting_from_historic_predictions( self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen ) -> FreqaiDataKitchen: """ diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index fd5d448bd..806e3ca15 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -230,11 +230,6 @@ def get_timerange_backtest_live_models(config: Config) -> str: """ dk = FreqaiDataKitchen(config) models_path = dk.get_full_models_path(config) - timerange: TimeRange = TimeRange() - if not config.get("freqai", {}).get("backtest_using_historic_predictions", True): - timerange, _ = dk.get_timerange_and_assets_end_dates_from_ready_models(models_path) - else: - dd = FreqaiDataDrawer(models_path, config) - timerange = dd.get_timerange_from_backtesting_live_dataframe() - + dd = FreqaiDataDrawer(models_path, config) + timerange = dd.get_timerange_from_live_historic_predictions() return timerange.timerange_str diff --git a/tests/freqai/test_freqai_backtesting.py b/tests/freqai/test_freqai_backtesting.py index 49b27f724..60963e762 100644 --- a/tests/freqai/test_freqai_backtesting.py +++ b/tests/freqai/test_freqai_backtesting.py @@ -81,7 +81,7 @@ def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testda bt_config = setup_optimize_configuration(args, RunMode.BACKTEST) with pytest.raises(OperationalException, - match=r".* Saved models are required to run backtest .*"): + match=r".* Historic predictions data is required to run backtest .*"): Backtesting(bt_config) Backtesting.cleanup() diff --git a/tests/freqai/test_freqai_datadrawer.py b/tests/freqai/test_freqai_datadrawer.py index 3abf84586..da3b8f9c1 100644 --- a/tests/freqai/test_freqai_datadrawer.py +++ b/tests/freqai/test_freqai_datadrawer.py @@ -98,7 +98,7 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf): shutil.rmtree(Path(freqai.dk.full_path)) -def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): +def test_get_timerange_from_live_historic_predictions(mocker, freqai_conf): strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -115,7 +115,7 @@ def test_get_timerange_from_backtesting_live_dataframe(mocker, freqai_conf): freqai.dd.save_historic_predictions_to_disk() freqai.dd.save_global_metadata_to_disk({"start_dry_live_date": 1516406400}) - timerange = freqai.dd.get_timerange_from_backtesting_live_dataframe() + timerange = freqai.dd.get_timerange_from_live_historic_predictions() assert timerange.startts == 1516406400 assert timerange.stopts == 1517356500 @@ -129,4 +129,4 @@ def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_co OperationalException, match=r'Historic predictions not found.*' ): - freqai.dd.get_timerange_from_backtesting_live_dataframe() + freqai.dd.get_timerange_from_live_historic_predictions() diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index 4dfc75d38..0dc897916 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -9,7 +9,6 @@ from freqtrade.configuration import TimeRange from freqtrade.data.dataprovider import DataProvider from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.utils import get_timerange_backtest_live_models from tests.conftest import get_patched_exchange, log_has_re from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy, make_data_dictionary, make_unfiltered_dataframe) @@ -166,72 +165,6 @@ def test_make_train_test_datasets(mocker, freqai_conf): assert len(data_dictionary['train_features'].index) == 1916 -def test_get_pairs_timestamp_validation(mocker, freqai_conf): - exchange = get_patched_exchange(mocker, freqai_conf) - strategy = get_patched_freqai_strategy(mocker, freqai_conf) - strategy.dp = DataProvider(freqai_conf, exchange) - strategy.freqai_info = freqai_conf.get("freqai", {}) - freqai = strategy.freqai - freqai.live = True - freqai.dk = FreqaiDataKitchen(freqai_conf) - freqai_conf['freqai'].update({"identifier": "invalid_id"}) - model_path = freqai.dk.get_full_models_path(freqai_conf) - with pytest.raises( - OperationalException, - match=r'.*required to run backtest with the freqai-backtest-live-models.*' - ): - freqai.dk.get_assets_timestamps_training_from_ready_models(model_path) - - -@pytest.mark.parametrize('model', [ - 'LightGBMRegressor' - ]) -def test_get_timerange_from_ready_models(mocker, freqai_conf, model): - freqai_conf.update({"freqaimodel": model}) - freqai_conf.update({"timerange": "20180110-20180130"}) - freqai_conf.update({"strategy": "freqai_test_strat"}) - freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False}) - - strategy = get_patched_freqai_strategy(mocker, freqai_conf) - exchange = get_patched_exchange(mocker, freqai_conf) - strategy.dp = DataProvider(freqai_conf, exchange) - strategy.freqai_info = freqai_conf.get("freqai", {}) - freqai = strategy.freqai - freqai.live = True - freqai.dk = FreqaiDataKitchen(freqai_conf) - timerange = TimeRange.parse_timerange("20180101-20180130") - freqai.dd.load_all_pair_histories(timerange, freqai.dk) - - freqai.dd.pair_dict = MagicMock() - - data_load_timerange = TimeRange.parse_timerange("20180101-20180130") - - # 1516233600 (2018-01-18 00:00) - Start Training 1 - # 1516406400 (2018-01-20 00:00) - End Training 1 (Backtest slice 1) - # 1516579200 (2018-01-22 00:00) - End Training 2 (Backtest slice 2) - # 1516838400 (2018-01-25 00:00) - End Timerange - - new_timerange = TimeRange("date", "date", 1516233600, 1516406400) - freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) - - new_timerange = TimeRange("date", "date", 1516406400, 1516579200) - freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) - - model_path = freqai.dk.get_full_models_path(freqai_conf) - (backtesting_timerange, - pairs_end_dates) = freqai.dk.get_timerange_and_assets_end_dates_from_ready_models( - models_path=model_path) - - assert len(pairs_end_dates["ADA"]) == 2 - assert backtesting_timerange.startts == 1516406400 - assert backtesting_timerange.stopts == 1516838400 - - backtesting_string_timerange = get_timerange_backtest_live_models(freqai_conf) - assert backtesting_string_timerange == '20180120-20180125' - - @pytest.mark.parametrize('model', [ 'LightGBMRegressor' ]) From d09157efb89a947e24451babd5b1ff11f3fa58e0 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Tue, 22 Nov 2022 15:15:42 -0300 Subject: [PATCH 15/71] update code to use one prediction file / pair --- freqtrade/freqai/data_kitchen.py | 39 ++++++++++++++++++--------- freqtrade/freqai/freqai_interface.py | 1 + tests/freqai/test_freqai_interface.py | 14 ++++++++-- 3 files changed, 39 insertions(+), 15 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index f75fd3dd8..65f3483af 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -9,7 +9,7 @@ from typing import Any, Dict, List, Tuple import numpy as np import numpy.typing as npt import pandas as pd -from pandas import DataFrame +from pandas import DataFrame, HDFStore from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -74,6 +74,7 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() + self.backtesting_h5_data: HDFStore = {} self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -1319,7 +1320,7 @@ class FreqaiDataKitchen: if not full_predictions_folder.is_dir(): full_predictions_folder.mkdir(parents=True, exist_ok=True) - append_df.to_hdf(self.backtesting_results_path, key='append_df', mode='w') + append_df.to_hdf(self.backtesting_results_path, key=self.model_filename) def get_backtesting_prediction( self @@ -1327,9 +1328,26 @@ class FreqaiDataKitchen: """ Get prediction dataframe from h5 file format """ - append_df = pd.read_hdf(self.backtesting_results_path) + append_df = self.backtesting_h5_data[self.model_filename] return append_df + def load_prediction_pair_file( + self + ) -> None: + """ + Load prediction file if it exists + """ + pair_file_name = self.pair.split(':')[0].replace('/', '_').lower() + path_to_predictionfile = Path(self.full_path / + self.backtest_predictions_folder / + f"{pair_file_name}_prediction.h5") + self.backtesting_results_path = path_to_predictionfile + file_exists = path_to_predictionfile.is_file() + if file_exists: + self.backtesting_h5_data = pd.HDFStore(path_to_predictionfile) + else: + self.backtesting_h5_data = {} + def check_if_backtest_prediction_is_valid( self, len_backtest_df: int @@ -1341,17 +1359,11 @@ class FreqaiDataKitchen: :return: :boolean: whether the prediction file is valid. """ - path_to_predictionfile = Path(self.full_path / - self.backtest_predictions_folder / - f"{self.model_filename}_prediction.h5") - self.backtesting_results_path = path_to_predictionfile - - file_exists = path_to_predictionfile.is_file() - - if file_exists: + if self.model_filename in self.backtesting_h5_data: append_df = self.get_backtesting_prediction() if len(append_df) == len_backtest_df and 'date' in append_df: - logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") + logger.info("Found backtesting prediction file " + f"at {self.backtesting_results_path.name}") return True else: logger.info("A new backtesting prediction file is required. " @@ -1360,7 +1372,8 @@ class FreqaiDataKitchen: return False else: logger.info( - f"Could not find backtesting prediction file at {path_to_predictionfile}" + "Could not find backtesting prediction file " + f"at {self.backtesting_results_path.name}" ) return False diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 80348fda8..21851b3b6 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -260,6 +260,7 @@ class IFreqaiModel(ABC): self.pair_it += 1 train_it = 0 + dk.load_prediction_pair_file() # Loop enforcing the sliding window training/backtesting paradigm # tr_train is the training time range e.g. 1 historical month # tr_backtest is the backtesting time range e.g. the week directly diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 66b3bac17..6e2e774fe 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -263,7 +263,9 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") - metadata = {"pair": "ADA/BTC"} + pair = "ADA/BTC" + metadata = {"pair": pair} + freqai.dk.pair = pair freqai.start_backtesting(df, metadata, freqai.dk) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] @@ -286,6 +288,9 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") + pair = "ADA/BTC" + metadata = {"pair": pair} + freqai.dk.pair = pair freqai.start_backtesting(df, metadata, freqai.dk) assert log_has_re( @@ -293,9 +298,14 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): caplog, ) + pair = "ETH/BTC" + metadata = {"pair": pair} + freqai.dk.pair = pair + freqai.start_backtesting(df, metadata, freqai.dk) + path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder) prediction_files = [x for x in path.iterdir() if x.is_file()] - assert len(prediction_files) == 5 + assert len(prediction_files) == 2 shutil.rmtree(Path(freqai.dk.full_path)) From 391817243cab8ba944933e19c26280caebf1baf0 Mon Sep 17 00:00:00 2001 From: froggleston Date: Fri, 25 Nov 2022 16:12:15 +0000 Subject: [PATCH 16/71] Tidy up complex functions --- freqtrade/data/entryexitanalysis.py | 31 +++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/freqtrade/data/entryexitanalysis.py b/freqtrade/data/entryexitanalysis.py index b22c3f87e..10969431d 100755 --- a/freqtrade/data/entryexitanalysis.py +++ b/freqtrade/data/entryexitanalysis.py @@ -152,9 +152,30 @@ def _do_group_table_output(bigdf, glist): logger.warning("Invalid group mask specified.") +def _select_rows_within_dates(df, date_start=None, date_end=None): + if (date_start is not None): + df = df.loc[(df['date'] >= date_start)] + + if (date_end is not None): + df = df.loc[(df['date'] < date_end)] + + return df + + +def _select_rows_by_entry_exit_tags(df, enter_reason_list, exit_reason_list): + if enter_reason_list and "all" not in enter_reason_list: + df = df.loc[(df['enter_reason'].isin(enter_reason_list))] + + if exit_reason_list and "all" not in exit_reason_list: + df = df.loc[(df['exit_reason'].isin(exit_reason_list))] + + return df + + def _print_results(analysed_trades, stratname, analysis_groups, enter_reason_list, exit_reason_list, - indicator_list, columns=None): + indicator_list, columns=None, + date_start=None, date_end=None): if columns is None: columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason'] @@ -162,15 +183,13 @@ def _print_results(analysed_trades, stratname, analysis_groups, for pair, trades in analysed_trades[stratname].items(): bigdf = pd.concat([bigdf, trades], ignore_index=True) + bigdf = _select_rows_within_dates(bigdf, date_start, date_end) + if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns): if analysis_groups: _do_group_table_output(bigdf, analysis_groups) - if enter_reason_list and "all" not in enter_reason_list: - bigdf = bigdf.loc[(bigdf['enter_reason'].isin(enter_reason_list))] - - if exit_reason_list and "all" not in exit_reason_list: - bigdf = bigdf.loc[(bigdf['exit_reason'].isin(exit_reason_list))] + bigdf = _select_rows_by_entry_exit_tags(bigdf, enter_reason_list, exit_reason_list) if "all" in indicator_list: print(bigdf) From 4790aaaae1eaa85657674d91b48621539af77711 Mon Sep 17 00:00:00 2001 From: froggleston Date: Sat, 26 Nov 2022 16:58:56 +0000 Subject: [PATCH 17/71] Implement cli options for backtesting-analysis date filtering --- docs/advanced-backtesting.md | 15 ++++ docs/utils.md | 8 ++ freqtrade/commands/analyze_commands.py | 8 +- freqtrade/commands/arguments.py | 3 +- freqtrade/commands/cli_options.py | 10 +++ freqtrade/configuration/configuration.py | 6 ++ freqtrade/data/entryexitanalysis.py | 95 ++++++++++++++---------- tests/data/test_entryexitanalysis.py | 9 +++ 8 files changed, 107 insertions(+), 47 deletions(-) diff --git a/docs/advanced-backtesting.md b/docs/advanced-backtesting.md index 5c2500f18..78e692f84 100644 --- a/docs/advanced-backtesting.md +++ b/docs/advanced-backtesting.md @@ -100,3 +100,18 @@ freqtrade backtesting-analysis -c --analysis-groups 0 2 --enter-re The indicators have to be present in your strategy's main DataFrame (either for your main timeframe or for informative timeframes) otherwise they will simply be ignored in the script output. + +### Filtering the trade output by date + +To show only trades between dates within your backtested timerange, supply the following option(s) in YYYYMMDD format: + +``` +--analysis-date-start : Start date to filter output trades, inclusive. e.g. 20220101 +--analysis-date-end : End date to filter output trades, exclusive. e.g. 20220131 +``` + +For example, if your backtest timerange was `20220101-20221231` but you only want to output trades in January: + +```bash +freqtrade backtesting-analysis -c --analysis-date-start 20220101 --analysis-date-end 20220201 +``` diff --git a/docs/utils.md b/docs/utils.md index 3d8a3bd03..e88a13a9a 100644 --- a/docs/utils.md +++ b/docs/utils.md @@ -722,6 +722,8 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V] [--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]] [--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]] [--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]] + [--analysis-date-start YYYYMMDD] + [--analysis-date-end YYYYMMDD] optional arguments: -h, --help show this help message and exit @@ -744,6 +746,12 @@ optional arguments: --indicator-list INDICATOR_LIST [INDICATOR_LIST ...] Comma separated list of indicators to analyse. e.g. 'close,rsi,bb_lowerband,profit_abs' + --analysis-date-start YYYYMMDD + Start date to filter trades for analysis (inclusive). e.g. + 20220101 + --analysis-date-end YYYYMMDD + End date to filter trades for analysis (exclusive). e.g. + 20220131 Common arguments: -v, --verbose Verbose mode (-vv for more, -vvv to get all messages). diff --git a/freqtrade/commands/analyze_commands.py b/freqtrade/commands/analyze_commands.py index b6b790788..20afa7ffd 100755 --- a/freqtrade/commands/analyze_commands.py +++ b/freqtrade/commands/analyze_commands.py @@ -60,10 +60,4 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None: logger.info('Starting freqtrade in analysis mode') - process_entry_exit_reasons(config['exportfilename'], - config['exchange']['pair_whitelist'], - config['analysis_groups'], - config['enter_reason_list'], - config['exit_reason_list'], - config['indicator_list'] - ) + process_entry_exit_reasons(config) diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index 79ab9dafa..159b18439 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -106,7 +106,8 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop "disableparamexport", "backtest_breakdown"] ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list", - "exit_reason_list", "indicator_list"] + "exit_reason_list", "indicator_list", + "analysis_date_start", "analysis_date_end"] NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", "list-markets", "list-pairs", "list-strategies", "list-freqaimodels", diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index 91ac16365..0592b0e53 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -658,6 +658,16 @@ AVAILABLE_CLI_OPTIONS = { nargs='+', default=[], ), + "analysis_date_start": Arg( + "--analysis-date-start", + help=("Start date to filter trades for analysis (inclusive). " + "e.g. '20220101'"), + ), + "analysis_date_end": Arg( + "--analysis-date-end", + help=("End date to filter trades for analysis (exclusive). " + "e.g. '20220131'"), + ), "freqaimodel": Arg( '--freqaimodel', help='Specify a custom freqaimodels.', diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 4929c023d..4e8abf48e 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -462,6 +462,12 @@ class Configuration: self._args_to_config(config, argname='indicator_list', logstring='Analysis indicator list: {}') + self._args_to_config(config, argname='analysis_date_start', + logstring='Analysis filter start date: {}') + + self._args_to_config(config, argname='analysis_date_end', + logstring='Analysis filter end date: {}') + def _process_runmode(self, config: Config) -> None: self._args_to_config(config, argname='dry_run', diff --git a/freqtrade/data/entryexitanalysis.py b/freqtrade/data/entryexitanalysis.py index 10969431d..77f14d0c6 100755 --- a/freqtrade/data/entryexitanalysis.py +++ b/freqtrade/data/entryexitanalysis.py @@ -1,11 +1,12 @@ import logging +from datetime import datetime from pathlib import Path -from typing import List, Optional import joblib import pandas as pd from tabulate import tabulate +from freqtrade.constants import Config from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data, load_backtest_stats) from freqtrade.exceptions import OperationalException @@ -153,55 +154,64 @@ def _do_group_table_output(bigdf, glist): def _select_rows_within_dates(df, date_start=None, date_end=None): + dtfmt = "%Y%m%d" + try: + bool(datetime.strptime(date_start, dtfmt)) + bool(datetime.strptime(date_end, dtfmt)) + except ValueError: + logger.error("Invalid start and/or end date provided. Use YYYYMMDD.") + return None + except TypeError: + return df + if (date_start is not None): df = df.loc[(df['date'] >= date_start)] if (date_end is not None): df = df.loc[(df['date'] < date_end)] - return df -def _select_rows_by_entry_exit_tags(df, enter_reason_list, exit_reason_list): +def _select_rows_by_tags(df, enter_reason_list, exit_reason_list): if enter_reason_list and "all" not in enter_reason_list: df = df.loc[(df['enter_reason'].isin(enter_reason_list))] if exit_reason_list and "all" not in exit_reason_list: df = df.loc[(df['exit_reason'].isin(exit_reason_list))] - return df -def _print_results(analysed_trades, stratname, analysis_groups, - enter_reason_list, exit_reason_list, - indicator_list, columns=None, - date_start=None, date_end=None): - if columns is None: - columns = ['pair', 'open_date', 'close_date', 'profit_abs', 'enter_reason', 'exit_reason'] - - bigdf = pd.DataFrame() +def prepare_results(analysed_trades, stratname, + enter_reason_list, exit_reason_list, + date_start=None, date_end=None): + res_df = pd.DataFrame() for pair, trades in analysed_trades[stratname].items(): - bigdf = pd.concat([bigdf, trades], ignore_index=True) + res_df = pd.concat([res_df, trades], ignore_index=True) - bigdf = _select_rows_within_dates(bigdf, date_start, date_end) + res_df = _select_rows_within_dates(res_df, date_start, date_end) - if bigdf.shape[0] > 0 and ('enter_reason' in bigdf.columns): + if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns): + res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list) + + return res_df + + +def print_results(res_df, analysis_groups, indicator_list): + if res_df.shape[0] > 0: if analysis_groups: - _do_group_table_output(bigdf, analysis_groups) - - bigdf = _select_rows_by_entry_exit_tags(bigdf, enter_reason_list, exit_reason_list) + _do_group_table_output(res_df, analysis_groups) if "all" in indicator_list: - print(bigdf) + print(res_df) elif indicator_list is not None: available_inds = [] for ind in indicator_list: - if ind in bigdf: + if ind in res_df: available_inds.append(ind) ilist = ["pair", "enter_reason", "exit_reason"] + available_inds - _print_table(bigdf[ilist], sortcols=['exit_reason'], show_index=False) + _print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False) else: - print("\\_ No trades to show") + print("\\No trades to show") def _print_table(df, sortcols=None, show_index=False): @@ -220,27 +230,34 @@ def _print_table(df, sortcols=None, show_index=False): ) -def process_entry_exit_reasons(backtest_dir: Path, - pairlist: List[str], - analysis_groups: Optional[List[str]] = ["0", "1", "2"], - enter_reason_list: Optional[List[str]] = ["all"], - exit_reason_list: Optional[List[str]] = ["all"], - indicator_list: Optional[List[str]] = []): +def process_entry_exit_reasons(config: Config): try: - backtest_stats = load_backtest_stats(backtest_dir) + analysis_groups = config.get('analysis_groups', []) + enter_reason_list = config.get('enter_reason_list', ["all"]) + exit_reason_list = config.get('exit_reason_list', ["all"]) + indicator_list = config.get('indicator_list', []) + analysis_date_start = config.get('analysis_date_start', None) + analysis_date_end = config.get('analysis_date_end', None) + + backtest_stats = load_backtest_stats(config['exportfilename']) + for strategy_name, results in backtest_stats['strategy'].items(): - trades = load_backtest_data(backtest_dir, strategy_name) + trades = load_backtest_data(config['exportfilename'], strategy_name) if not trades.empty: - signal_candles = _load_signal_candles(backtest_dir) - analysed_trades_dict = _process_candles_and_indicators(pairlist, strategy_name, - trades, signal_candles) - _print_results(analysed_trades_dict, - strategy_name, - analysis_groups, - enter_reason_list, - exit_reason_list, - indicator_list) + signal_candles = _load_signal_candles(config['exportfilename']) + analysed_trades_dict = _process_candles_and_indicators( + config['exchange']['pair_whitelist'], strategy_name, + trades, signal_candles) + + res_df = prepare_results(analysed_trades_dict, strategy_name, + enter_reason_list, exit_reason_list, + date_start=analysis_date_start, + date_end=analysis_date_end) + + print_results(res_df, + analysis_groups, + indicator_list) except ValueError as e: raise OperationalException(e) from e diff --git a/tests/data/test_entryexitanalysis.py b/tests/data/test_entryexitanalysis.py index 588220465..8daca1a67 100755 --- a/tests/data/test_entryexitanalysis.py +++ b/tests/data/test_entryexitanalysis.py @@ -189,3 +189,12 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp assert '0.5' in captured.out assert '1' in captured.out assert '2.5' in captured.out + + # test date filtering + args = get_args(base_args + + ['--analysis-date-start', "20180129", + '--analysis-date-end', "20180130"] + ) + start_analysis_entries_exits(args) + captured = capsys.readouterr() + assert 'enter_tag_long_b' not in captured.out From 1a3f88c7b93e10b65869cef1adb6e023f650d916 Mon Sep 17 00:00:00 2001 From: froggleston Date: Sun, 27 Nov 2022 11:30:13 +0000 Subject: [PATCH 18/71] Replace separate start/end date option with usual timerange option --- docs/advanced-backtesting.md | 7 ++--- freqtrade/commands/arguments.py | 3 +- freqtrade/commands/cli_options.py | 10 ------- freqtrade/configuration/configuration.py | 7 ++--- freqtrade/data/entryexitanalysis.py | 36 +++++++++--------------- tests/data/test_entryexitanalysis.py | 6 ++-- 6 files changed, 21 insertions(+), 48 deletions(-) diff --git a/docs/advanced-backtesting.md b/docs/advanced-backtesting.md index 78e692f84..ae3eb2e4e 100644 --- a/docs/advanced-backtesting.md +++ b/docs/advanced-backtesting.md @@ -103,15 +103,14 @@ output. ### Filtering the trade output by date -To show only trades between dates within your backtested timerange, supply the following option(s) in YYYYMMDD format: +To show only trades between dates within your backtested timerange, supply the usual `timerange` option in `YYYYMMDD-[YYYYMMDD]` format: ``` ---analysis-date-start : Start date to filter output trades, inclusive. e.g. 20220101 ---analysis-date-end : End date to filter output trades, exclusive. e.g. 20220131 +--timerange : Timerange to filter output trades, start date inclusive, end date exclusive. e.g. 20220101-20221231 ``` For example, if your backtest timerange was `20220101-20221231` but you only want to output trades in January: ```bash -freqtrade backtesting-analysis -c --analysis-date-start 20220101 --analysis-date-end 20220201 +freqtrade backtesting-analysis -c --timerange 20220101-20220201 ``` diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index 159b18439..b53a1022d 100644 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -106,8 +106,7 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop "disableparamexport", "backtest_breakdown"] ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list", - "exit_reason_list", "indicator_list", - "analysis_date_start", "analysis_date_end"] + "exit_reason_list", "indicator_list", "timerange"] NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", "list-markets", "list-pairs", "list-strategies", "list-freqaimodels", diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index 0592b0e53..91ac16365 100644 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -658,16 +658,6 @@ AVAILABLE_CLI_OPTIONS = { nargs='+', default=[], ), - "analysis_date_start": Arg( - "--analysis-date-start", - help=("Start date to filter trades for analysis (inclusive). " - "e.g. '20220101'"), - ), - "analysis_date_end": Arg( - "--analysis-date-end", - help=("End date to filter trades for analysis (exclusive). " - "e.g. '20220131'"), - ), "freqaimodel": Arg( '--freqaimodel', help='Specify a custom freqaimodels.', diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 4e8abf48e..664610f33 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -462,11 +462,8 @@ class Configuration: self._args_to_config(config, argname='indicator_list', logstring='Analysis indicator list: {}') - self._args_to_config(config, argname='analysis_date_start', - logstring='Analysis filter start date: {}') - - self._args_to_config(config, argname='analysis_date_end', - logstring='Analysis filter end date: {}') + self._args_to_config(config, argname='timerange', + logstring='Filter trades by timerange: {}') def _process_runmode(self, config: Config) -> None: diff --git a/freqtrade/data/entryexitanalysis.py b/freqtrade/data/entryexitanalysis.py index 77f14d0c6..565a279b1 100755 --- a/freqtrade/data/entryexitanalysis.py +++ b/freqtrade/data/entryexitanalysis.py @@ -1,11 +1,11 @@ import logging -from datetime import datetime from pathlib import Path import joblib import pandas as pd from tabulate import tabulate +from freqtrade.configuration import TimeRange from freqtrade.constants import Config from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data, load_backtest_stats) @@ -153,22 +153,12 @@ def _do_group_table_output(bigdf, glist): logger.warning("Invalid group mask specified.") -def _select_rows_within_dates(df, date_start=None, date_end=None): - dtfmt = "%Y%m%d" - try: - bool(datetime.strptime(date_start, dtfmt)) - bool(datetime.strptime(date_end, dtfmt)) - except ValueError: - logger.error("Invalid start and/or end date provided. Use YYYYMMDD.") - return None - except TypeError: - return df - - if (date_start is not None): - df = df.loc[(df['date'] >= date_start)] - - if (date_end is not None): - df = df.loc[(df['date'] < date_end)] +def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'): + if timerange: + if timerange.starttype == 'date': + df = df.loc[(df[df_date_col] >= timerange.startdt)] + if timerange.stoptype == 'date': + df = df.loc[(df[df_date_col] < timerange.stopdt)] return df @@ -183,12 +173,12 @@ def _select_rows_by_tags(df, enter_reason_list, exit_reason_list): def prepare_results(analysed_trades, stratname, enter_reason_list, exit_reason_list, - date_start=None, date_end=None): + timerange=None): res_df = pd.DataFrame() for pair, trades in analysed_trades[stratname].items(): res_df = pd.concat([res_df, trades], ignore_index=True) - res_df = _select_rows_within_dates(res_df, date_start, date_end) + res_df = _select_rows_within_dates(res_df, timerange) if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns): res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list) @@ -236,8 +226,9 @@ def process_entry_exit_reasons(config: Config): enter_reason_list = config.get('enter_reason_list', ["all"]) exit_reason_list = config.get('exit_reason_list', ["all"]) indicator_list = config.get('indicator_list', []) - analysis_date_start = config.get('analysis_date_start', None) - analysis_date_end = config.get('analysis_date_end', None) + + timerange = TimeRange.parse_timerange(None if config.get( + 'timerange') is None else str(config.get('timerange'))) backtest_stats = load_backtest_stats(config['exportfilename']) @@ -252,8 +243,7 @@ def process_entry_exit_reasons(config: Config): res_df = prepare_results(analysed_trades_dict, strategy_name, enter_reason_list, exit_reason_list, - date_start=analysis_date_start, - date_end=analysis_date_end) + timerange=timerange) print_results(res_df, analysis_groups, diff --git a/tests/data/test_entryexitanalysis.py b/tests/data/test_entryexitanalysis.py index 8daca1a67..e33ed4955 100755 --- a/tests/data/test_entryexitanalysis.py +++ b/tests/data/test_entryexitanalysis.py @@ -191,10 +191,8 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp assert '2.5' in captured.out # test date filtering - args = get_args(base_args + - ['--analysis-date-start', "20180129", - '--analysis-date-end', "20180130"] - ) + args = get_args(base_args + ['--timerange', "20180129-20180130"]) start_analysis_entries_exits(args) captured = capsys.readouterr() + assert 'enter_tag_long_a' in captured.out assert 'enter_tag_long_b' not in captured.out From fe00a651632e040860b70a80140c62487588199c Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 21:34:07 +0300 Subject: [PATCH 19/71] FIx custom reward link --- docs/freqai-reinforcement-learning.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 241ccc3e2..741a9bbb4 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -1,14 +1,14 @@ # Reinforcement Learning !!! Note "Installation size" - Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". + Reinforcement learning dependencies include large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?". Users who prefer docker should ensure they use the docker image appended with `_freqairl`. ## Background and terminology ### What is RL and why does FreqAI need it? -Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-the-reward)). The reward is used to train weights in a neural network. +Reinforcement learning involves two important components, the *agent* and the training *environment*. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made `calculate_reward()` (here we offer a default reward for users to build on if they wish [details here](#creating-a-custom-reward-function)). The reward is used to train weights in a neural network. A second important component of the FreqAI RL implementation is the use of *state* information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). *FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.* @@ -16,9 +16,9 @@ Reinforcement learning is a natural progression for FreqAI, since it adds a new ### The RL interface -With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-the-reward). +With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited `BaseReinforcementLearner` object (e.g. `freqai/prediction_models/ReinforcementLearner`). Inside this user class, the RL environment is available and customized via `MyRLEnv` as [shown below](#creating-a-custom-reward-function). -We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-the-reward), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. +We envision the majority of users focusing their effort on creative design of the `calculate_reward()` function [details here](#creating-a-custom-reward-function), while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely. The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from `gym.env` which means that it is necessary to write an entirely new environment in order to switch to a different library. @@ -130,7 +130,7 @@ After users realize there are no labels to set, they will soon understand that t return df ``` -It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. +It is important to consider that `&-action` depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short. ## Configuring the Reinforcement Learner @@ -170,21 +170,21 @@ As you begin to modify the strategy and the prediction model, you will quickly r class MyCoolRLModel(ReinforcementLearner): """ - User created RL prediction model. + User created RL prediction model. Save this file to `freqtrade/user_data/freqaimodels` then use it with: freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat - - Here the users can override any of the functions - available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this + + Here the users can override any of the functions + available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this is where the user overrides `MyRLEnv` (see below), to define custom `calculate_reward()` function, or to override any other parts of the environment. - + This class also allows users to override any other part of the IFreqaiModel tree. - For example, the user can override `def fit()` or `def train()` or `def predict()` + For example, the user can override `def fit()` or `def train()` or `def predict()` to take fine-tuned control over these processes. Another common override may be `def data_cleaning_predict()` where the user can @@ -253,7 +253,7 @@ FreqAI provides two base environments, `Base4ActionEnvironment` and `Base5Action * the actions available in the `calculate_reward` * the actions consumed by the user strategy -Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-the-reward)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. +Both of the FreqAI provided environments inherit from an action/position agnostic environment object called the `BaseEnvironment`, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the `calculate_reward()` (see details [here](#creating-a-custom-reward-function)). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your `MyRLEnv` in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from `BaseEnvironment`. !!! Note FreqAI does not provide by default, a long-only training environment. However, creating one should be as simple as copy-pasting one of the built in environments and removing the `short` actions (and all associated references to those). From 5b5859238b45795852d06ef6bc7d82681b9dee6a Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 22:06:14 +0300 Subject: [PATCH 20/71] Fix typo --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 741a9bbb4..ae3f67ed1 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -24,7 +24,7 @@ The framework is built on stable_baselines3 (torch) and OpenAI gym for the base ### Important considerations -As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, ect) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. ## Running Reinforcement Learning From a85602eb9c68f5bb36cbb2e2aae2c61ad2518bc1 Mon Sep 17 00:00:00 2001 From: Joe Schr <8218910+TheJoeSchr@users.noreply.github.com> Date: Tue, 22 Nov 2022 11:41:28 +0100 Subject: [PATCH 21/71] add "how to run tests" --- docs/developer.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/developer.md b/docs/developer.md index b4961ac77..94923b035 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -49,6 +49,13 @@ For more information about the [Remote container extension](https://code.visuals New code should be covered by basic unittests. Depending on the complexity of the feature, Reviewers may request more in-depth unittests. If necessary, the Freqtrade team can assist and give guidance with writing good tests (however please don't expect anyone to write the tests for you). +#### How to run tests + +Use `py.test` in root folder to run all available testcases and confirm your local environment is setup correctly + +!!! Note "develop branch" + This assumes that you have `stable` branch checked out. Other branches may be work in progress with tests not working yet. + #### Checking log content in tests Freqtrade uses 2 main methods to check log content in tests, `log_has()` and `log_has_re()` (to check using regex, in case of dynamic log-messages). From 320535a227357366c3211175bfdae5d46c541680 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 27 Nov 2022 20:06:10 +0100 Subject: [PATCH 22/71] improve tests doc wording --- docs/developer.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/developer.md b/docs/developer.md index 94923b035..ea2e36ce1 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -51,10 +51,10 @@ If necessary, the Freqtrade team can assist and give guidance with writing good #### How to run tests -Use `py.test` in root folder to run all available testcases and confirm your local environment is setup correctly +Use `pytest` in root folder to run all available testcases and confirm your local environment is setup correctly -!!! Note "develop branch" - This assumes that you have `stable` branch checked out. Other branches may be work in progress with tests not working yet. +!!! Note "feature branches" + Tests are expected to pass on the `develop` and `stable` branches. Other branches may be work in progress with tests not working yet. #### Checking log content in tests From a02da08065a305ed822c5795befff39afdbc97c3 Mon Sep 17 00:00:00 2001 From: Emre Date: Sun, 27 Nov 2022 22:23:00 +0300 Subject: [PATCH 23/71] Fix typo --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index ae3f67ed1..226c02919 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the you need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead the user need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: From 67d94692774eade7cb25c9ddb22ba81a5ce65ee0 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 20:42:04 +0100 Subject: [PATCH 24/71] small wording fix --- docs/freqai-reinforcement-learning.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 226c02919..d690c7645 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -24,7 +24,7 @@ The framework is built on stable_baselines3 (torch) and OpenAI gym for the base ### Important considerations -As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL trading environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks such as `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. +As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is *NOT*. In fact, the RL training environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks like `custom_exit`, `custom_stoploss`, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free-will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the `calculate_reward()`. Thus, it is important to consider that the agent training environment is not identical to the real world. ## Running Reinforcement Learning @@ -95,7 +95,7 @@ Most of the function remains the same as for typical Regressors, however, the fu informative[f"%-{pair}raw_low"] = informative["low"] ``` -Finally, there is no explicit "label" to make - instead the user need to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. +Finally, there is no explicit "label" to make - instead it is necessary to assign the `&-action` column which will contain the agent's actions when accessed in `populate_entry/exit_trends()`. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action. After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy: From 64d4a52a5615ff9d5ddc2be693d8a79c002d0c9f Mon Sep 17 00:00:00 2001 From: richardjozsa Date: Sun, 27 Nov 2022 20:43:50 +0100 Subject: [PATCH 25/71] Improve the RL learning process Improve the RL learning process by selecting random start point for the agent, it can help to block the agent to only learn on the selected period of time, while improving the quality of the model. --- freqtrade/freqai/RL/BaseEnvironment.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 3332e5a18..5d881ba32 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -9,6 +9,7 @@ import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame +import random from freqtrade.data.dataprovider import DataProvider @@ -121,6 +122,9 @@ class BaseEnvironment(gym.Env): self._done = False if self.starting_point is True: + length_of_data = int(self._end_tick/4) + start_tick = random.randint(self.window_size+1, length_of_data) + self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] From 25e041b98eabd62513d4c4494ed9e2b12100dd6e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 20:50:03 +0100 Subject: [PATCH 26/71] sneak in small change to FreqaiExampleHybridStrategy docstring and startup count --- freqtrade/templates/FreqaiExampleHybridStrategy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/templates/FreqaiExampleHybridStrategy.py b/freqtrade/templates/FreqaiExampleHybridStrategy.py index 26335956f..9d1842cd7 100644 --- a/freqtrade/templates/FreqaiExampleHybridStrategy.py +++ b/freqtrade/templates/FreqaiExampleHybridStrategy.py @@ -19,7 +19,7 @@ class FreqaiExampleHybridStrategy(IStrategy): Launching this strategy would be: - freqtrade trade --strategy FreqaiExampleHyridStrategy --strategy-path freqtrade/templates + freqtrade trade --strategy FreqaiExampleHybridStrategy --strategy-path freqtrade/templates --freqaimodel CatboostClassifier --config config_examples/config_freqai.example.json or the user simply adds this to their config: @@ -86,7 +86,7 @@ class FreqaiExampleHybridStrategy(IStrategy): process_only_new_candles = True stoploss = -0.05 use_exit_signal = True - startup_candle_count: int = 300 + startup_candle_count: int = 30 can_short = True # Hyperoptable parameters From 7fd6bc526e38537a8595abcbe562af6ac6f53729 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 21:03:13 +0100 Subject: [PATCH 27/71] add randomize_starting_position to the rl_config --- docs/freqai-parameter-table.md | 1 + freqtrade/constants.py | 1 + freqtrade/freqai/RL/BaseEnvironment.py | 7 ++++--- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 02426ec13..f2a52a9b8 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -82,6 +82,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `model_reward_parameters` | Parameters used inside the customizable `calculate_reward()` function in `ReinforcementLearner.py`
**Datatype:** int. | `add_state_info` | Tell FreqAI to include state information in the feature set for training and inferencing. The current state variables include trade duration, current profit, trade position. This is only available in dry/live runs, and is automatically switched to false for backtesting.
**Datatype:** bool.
Default: `False`. | `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[, dict(vf=[], pi=[])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each. +| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting.
**Datatype:** bool.
Default: `False`. ### Additional parameters diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 878c38929..d869b89f6 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -591,6 +591,7 @@ CONF_SCHEMA = { "model_type": {"type": "string", "default": "PPO"}, "policy_type": {"type": "string", "default": "MlpPolicy"}, "net_arch": {"type": "array", "default": [128, 128]}, + "randomize_startinng_position": {"type": "boolean", "default": False}, "model_reward_parameters": { "type": "object", "properties": { diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 5d881ba32..8f940dd1b 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -122,9 +122,10 @@ class BaseEnvironment(gym.Env): self._done = False if self.starting_point is True: - length_of_data = int(self._end_tick/4) - start_tick = random.randint(self.window_size+1, length_of_data) - self._start_tick = start_tick + if self.rl_config.get('randomize_starting_position', False): + length_of_data = int(self._end_tick / 4) + start_tick = random.randint(self.window_size + 1, length_of_data) + self._start_tick = start_tick self._position_history = (self._start_tick * [None]) + [self._position] else: self._position_history = (self.window_size * [None]) + [self._position] From 56518def42fab1fd3d89f12bcda281a1eff11ef7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 27 Nov 2022 21:06:01 +0100 Subject: [PATCH 28/71] isort --- freqtrade/freqai/RL/BaseEnvironment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 8f940dd1b..66bdb8435 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -1,4 +1,5 @@ import logging +import random from abc import abstractmethod from enum import Enum from typing import Optional @@ -9,7 +10,6 @@ import pandas as pd from gym import spaces from gym.utils import seeding from pandas import DataFrame -import random from freqtrade.data.dataprovider import DataProvider From f21dbbd8bb54a42203db28d28b017036e5e62d65 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 28 Nov 2022 00:06:02 +0300 Subject: [PATCH 29/71] Update imports of custom model --- docs/freqai-reinforcement-learning.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index d690c7645..353d7a2cc 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -166,7 +166,8 @@ As you begin to modify the strategy and the prediction model, you will quickly r ```python from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner - from freqtrade.freqai.RL.Base5ActionRLEnv import Base5ActionRLEnv + from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions + class MyCoolRLModel(ReinforcementLearner): """ From 49e41925b01bfd4f66de4893afaa399f4347e829 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:42 +0000 Subject: [PATCH 30/71] Bump flake8 from 5.0.4 to 6.0.0 Bumps [flake8](https://github.com/pycqa/flake8) from 5.0.4 to 6.0.0. - [Release notes](https://github.com/pycqa/flake8/releases) - [Commits](https://github.com/pycqa/flake8/compare/5.0.4...6.0.0) --- updated-dependencies: - dependency-name: flake8 dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index b46c244b5..ffce3d696 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,7 +7,7 @@ -r docs/requirements-docs.txt coveralls==3.3.1 -flake8==5.0.4 +flake8==6.0.0 flake8-tidy-imports==4.8.0 mypy==0.991 pre-commit==2.20.0 From 7e75bc8fcf40e8f250e6c0cd082b87051c081d3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:48 +0000 Subject: [PATCH 31/71] Bump sb3-contrib from 1.6.1 to 1.6.2 Bumps [sb3-contrib](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib) from 1.6.1 to 1.6.2. - [Release notes](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib/releases) - [Commits](https://github.com/Stable-Baselines-Team/stable-baselines3-contrib/compare/v1.6.1...v1.6.2) --- updated-dependencies: - dependency-name: sb3-contrib dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index b6bd7ef15..2a0a04455 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -5,4 +5,4 @@ torch==1.12.1 stable-baselines3==1.6.1 gym==0.21 -sb3-contrib==1.6.1 +sb3-contrib==1.6.2 From 5aec51a16c37d44993e36d5cacfd8c01d464a93b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:55 +0000 Subject: [PATCH 32/71] Bump urllib3 from 1.26.12 to 1.26.13 Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.12 to 1.26.13. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/1.26.13/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.12...1.26.13) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..881ae04ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ python-telegram-bot==13.14 arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 -urllib3==1.26.12 +urllib3==1.26.13 jsonschema==4.17.0 TA-Lib==0.4.25 technical==1.3.0 From 924bbad199a0d65147d6208ae9e3d20136bbab9e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:00:58 +0000 Subject: [PATCH 33/71] Bump pyarrow from 10.0.0 to 10.0.1 Bumps [pyarrow](https://github.com/apache/arrow) from 10.0.0 to 10.0.1. - [Release notes](https://github.com/apache/arrow/releases) - [Commits](https://github.com/apache/arrow/compare/go/v10.0.0...go/v10.0.1) --- updated-dependencies: - dependency-name: pyarrow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..cde6b0344 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,7 +22,7 @@ jinja2==3.1.2 tables==3.7.0 blosc==1.10.6 joblib==1.2.0 -pyarrow==10.0.0; platform_machine != 'armv7l' +pyarrow==10.0.1; platform_machine != 'armv7l' # find first, C search in arrays py_find_1st==1.1.5 From a46b09d400ec78d0b278c14212728df6b6c46345 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:01:01 +0000 Subject: [PATCH 34/71] Bump prompt-toolkit from 3.0.32 to 3.0.33 Bumps [prompt-toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit) from 3.0.32 to 3.0.33. - [Release notes](https://github.com/prompt-toolkit/python-prompt-toolkit/releases) - [Changelog](https://github.com/prompt-toolkit/python-prompt-toolkit/blob/master/CHANGELOG) - [Commits](https://github.com/prompt-toolkit/python-prompt-toolkit/compare/3.0.32...3.0.33) --- updated-dependencies: - dependency-name: prompt-toolkit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..f598d2377 100644 --- a/requirements.txt +++ b/requirements.txt @@ -47,7 +47,7 @@ psutil==5.9.4 colorama==0.4.6 # Building config files interactively questionary==1.10.0 -prompt-toolkit==3.0.32 +prompt-toolkit==3.0.33 # Extensions to datetime library python-dateutil==2.8.2 From 348731598e633d23576f0b89f69423021c396c5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 03:01:25 +0000 Subject: [PATCH 35/71] Bump ccxt from 2.1.96 to 2.2.36 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.1.96 to 2.2.36. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.1.96...2.2.36) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a9555b90c..2cb829c3d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.1 pandas-ta==0.3.14b -ccxt==2.1.96 +ccxt==2.2.36 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.3; platform_machine != 'armv7l' From 9c28cc810d4ee384773d02481226686b2cbc9715 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 05:33:45 +0000 Subject: [PATCH 36/71] Bump cryptography from 38.0.1 to 38.0.4 Bumps [cryptography](https://github.com/pyca/cryptography) from 38.0.1 to 38.0.4. - [Release notes](https://github.com/pyca/cryptography/releases) - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/38.0.1...38.0.4) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2cb829c3d..2e5293cf6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ pandas-ta==0.3.14b ccxt==2.2.36 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' -cryptography==38.0.3; platform_machine != 'armv7l' +cryptography==38.0.4; platform_machine != 'armv7l' aiohttp==3.8.3 SQLAlchemy==1.4.44 python-telegram-bot==13.14 From d73fd42769298721c3a2306540263d74c9172ed9 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 06:38:35 +0100 Subject: [PATCH 37/71] Fix flake8 error introduced with 6.0 update --- freqtrade/persistence/pairlock_middleware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/persistence/pairlock_middleware.py b/freqtrade/persistence/pairlock_middleware.py index ec57e91fc..69d8b098b 100644 --- a/freqtrade/persistence/pairlock_middleware.py +++ b/freqtrade/persistence/pairlock_middleware.py @@ -87,7 +87,7 @@ class PairLocks(): Get the lock that expires the latest for the pair given. """ locks = PairLocks.get_pair_locks(pair, now, side=side) - locks = sorted(locks, key=lambda l: l.lock_end_time, reverse=True) + locks = sorted(locks, key=lambda lock: lock.lock_end_time, reverse=True) return locks[0] if locks else None @staticmethod From dc03317cc89c4a75359c866bf9673e9305bde0f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 07:02:54 +0000 Subject: [PATCH 38/71] Bump jsonschema from 4.17.0 to 4.17.1 Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.17.0 to 4.17.1. - [Release notes](https://github.com/python-jsonschema/jsonschema/releases) - [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.17.0...v4.17.1) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cc38bfc96..9ae85ac89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 urllib3==1.26.13 -jsonschema==4.17.0 +jsonschema==4.17.1 TA-Lib==0.4.25 technical==1.3.0 tabulate==0.9.0 From 9880e9ab600832f6479bedeafbbce267ba92c6e3 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Mon, 28 Nov 2022 17:10:17 +0900 Subject: [PATCH 39/71] Fix typo in strategy_analysis_example.md seperate -> separate --- docs/strategy_analysis_example.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/strategy_analysis_example.md b/docs/strategy_analysis_example.md index 1526ea038..bae4a9108 100644 --- a/docs/strategy_analysis_example.md +++ b/docs/strategy_analysis_example.md @@ -232,7 +232,7 @@ graph = generate_candlestick_graph(pair=pair, # Show graph inline # graph.show() -# Render graph in a seperate window +# Render graph in a separate window graph.show(renderer="browser") ``` From 05a7fca2424c2c10b85f4d5e44f8ba5fa26fdb4c Mon Sep 17 00:00:00 2001 From: Robert Davey Date: Mon, 28 Nov 2022 12:12:45 +0000 Subject: [PATCH 40/71] Fix utils docs for backtesting-analysis --- docs/utils.md | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/docs/utils.md b/docs/utils.md index e88a13a9a..e717a0f9c 100644 --- a/docs/utils.md +++ b/docs/utils.md @@ -722,8 +722,7 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V] [--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]] [--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]] [--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]] - [--analysis-date-start YYYYMMDD] - [--analysis-date-end YYYYMMDD] + [--timerange YYYYMMDD-[YYYYMMDD]] optional arguments: -h, --help show this help message and exit @@ -746,12 +745,10 @@ optional arguments: --indicator-list INDICATOR_LIST [INDICATOR_LIST ...] Comma separated list of indicators to analyse. e.g. 'close,rsi,bb_lowerband,profit_abs' - --analysis-date-start YYYYMMDD - Start date to filter trades for analysis (inclusive). e.g. - 20220101 - --analysis-date-end YYYYMMDD - End date to filter trades for analysis (exclusive). e.g. - 20220131 + --timerange YYYYMMDD-[YYYYMMDD] + Timerange to filter trades for analysis, + start inclusive, end exclusive. e.g. + 20220101-20220201 Common arguments: -v, --verbose Verbose mode (-vv for more, -vvv to get all messages). From 9cbfa1201113afeb143fb22b3b9ee4be125c5263 Mon Sep 17 00:00:00 2001 From: Emre Date: Mon, 28 Nov 2022 16:02:17 +0300 Subject: [PATCH 41/71] Directly set model_type in base RL model --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 1 + freqtrade/freqai/data_drawer.py | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 709ded048..e1381ab62 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -64,6 +64,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() self.net_arch = self.rl_config.get('net_arch', [128, 128]) + self.dd.model_type = "stable_baselines" def unset_outlier_removal(self): """ diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 3b9352efe..ab41240e9 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -99,12 +99,7 @@ class FreqaiDataDrawer: self.empty_pair_dict: pair_info = { "model_filename": "", "trained_timestamp": 0, "data_path": "", "extras": {}} - if 'Reinforcement' in self.config['freqaimodel']: - self.model_type = 'stable_baselines' - logger.warning('User passed a ReinforcementLearner model, FreqAI will ' - 'now use stable_baselines3 to save models.') - else: - self.model_type = self.freqai_info.get('model_save_type', 'joblib') + self.model_type = self.freqai_info.get('model_save_type', 'joblib') def update_metric_tracker(self, metric: str, value: float, pair: str) -> None: """ From e891c41760e38b41d57639933c1f986b6d8abcc3 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 18:20:30 +0100 Subject: [PATCH 42/71] Fix typo in ipynb, too. --- freqtrade/templates/strategy_analysis_example.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/freqtrade/templates/strategy_analysis_example.ipynb b/freqtrade/templates/strategy_analysis_example.ipynb index 77444a023..5fb14ab2f 100644 --- a/freqtrade/templates/strategy_analysis_example.ipynb +++ b/freqtrade/templates/strategy_analysis_example.ipynb @@ -328,7 +328,7 @@ "# Show graph inline\n", "# graph.show()\n", "\n", - "# Render graph in a seperate window\n", + "# Render graph in a separate window\n", "graph.show(renderer=\"browser\")\n" ] }, From 8efa8bc78a445067637f51cbd952b2e55552831a Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 19:35:17 +0100 Subject: [PATCH 43/71] Update stable-baselines3 to 1.6.2 --- requirements-freqai-rl.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index 2a0a04455..df541c701 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -3,6 +3,7 @@ # Required for freqai-rl torch==1.12.1 -stable-baselines3==1.6.1 -gym==0.21 +stable-baselines3==1.6.2 sb3-contrib==1.6.2 +# Gym is forced to this version by stable-baselines3. +gym==0.21 From 2c75b5e027d137b55904d78aeadc0063291b876a Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 13:26:27 +0000 Subject: [PATCH 44/71] Extract "live" test from regular tests --- .github/workflows/ci.yml | 55 +++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0d5a7540d..334f7bec3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,12 +66,6 @@ jobs: - name: Tests run: | pytest --random-order --cov=freqtrade --cov-config=.coveragerc - if: matrix.python-version != '3.9' || matrix.os != 'ubuntu-22.04' - - - name: Tests incl. ccxt compatibility tests - run: | - pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun - if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-22.04' - name: Coveralls if: (runner.os == 'Linux' && matrix.python-version == '3.10' && matrix.os == 'ubuntu-22.04') @@ -310,9 +304,56 @@ jobs: details: Freqtrade doc test failed! webhookUrl: ${{ secrets.DISCORD_WEBHOOK }} + + build_linux_online: + # Run pytest with "live" checks + runs-on: ubuntu-22.04 + # permissions: + steps: + - uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.9" + + - name: Cache_dependencies + uses: actions/cache@v3 + id: cache + with: + path: ~/dependencies/ + key: ${{ runner.os }}-dependencies + + - name: pip cache (linux) + uses: actions/cache@v3 + if: runner.os == 'Linux' + with: + path: ~/.cache/pip + key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip + + - name: TA binary *nix + if: steps.cache.outputs.cache-hit != 'true' + run: | + cd build_helpers && ./install_ta-lib.sh ${HOME}/dependencies/; cd .. + + - name: Installation - *nix + if: runner.os == 'Linux' + run: | + python -m pip install --upgrade pip wheel + export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH + export TA_LIBRARY_PATH=${HOME}/dependencies/lib + export TA_INCLUDE_PATH=${HOME}/dependencies/include + pip install -r requirements-dev.txt + pip install -e . + + - name: Tests incl. ccxt compatibility tests + run: | + pytest --random-order --cov=freqtrade --cov-config=.coveragerc --longrun + + # Notify only once - when CI completes (and after deploy) in case it's successfull notify-complete: - needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit ] + needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit, build_linux_online ] runs-on: ubuntu-22.04 # Discord notification can't handle schedule events if: (github.event_name != 'schedule') From 5500c10f7853eeb09c08c59490807f2fb9df217f Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 28 Nov 2022 19:40:43 +0100 Subject: [PATCH 45/71] Improve CI file layout --- .github/workflows/ci.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 334f7bec3..e730d1489 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -353,7 +353,15 @@ jobs: # Notify only once - when CI completes (and after deploy) in case it's successfull notify-complete: - needs: [ build_linux, build_macos, build_windows, docs_check, mypy_version_check, pre-commit, build_linux_online ] + needs: [ + build_linux, + build_macos, + build_windows, + docs_check, + mypy_version_check, + pre-commit, + build_linux_online + ] runs-on: ubuntu-22.04 # Discord notification can't handle schedule events if: (github.event_name != 'schedule') From b87545cd1256faf439313c47dad0bed89267fb5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Nov 2022 07:46:38 +0000 Subject: [PATCH 46/71] Bump torch from 1.12.1 to 1.13.0 Bumps [torch](https://github.com/pytorch/pytorch) from 1.12.1 to 1.13.0. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Changelog](https://github.com/pytorch/pytorch/blob/master/RELEASE.md) - [Commits](https://github.com/pytorch/pytorch/compare/v1.12.1...v1.13.0) --- updated-dependencies: - dependency-name: torch dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-freqai-rl.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index df541c701..67bd66102 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -2,7 +2,7 @@ -r requirements-freqai.txt # Required for freqai-rl -torch==1.12.1 +torch==1.13.0 stable-baselines3==1.6.2 sb3-contrib==1.6.2 # Gym is forced to this version by stable-baselines3. From 8ea58ab35243cd238a989faabc429160b180cb52 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Tue, 29 Nov 2022 10:38:35 -0300 Subject: [PATCH 47/71] change BT prediction files to feather format --- freqtrade/freqai/data_kitchen.py | 40 ++++++++++------------------ freqtrade/freqai/freqai_interface.py | 1 - 2 files changed, 14 insertions(+), 27 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index d438aaede..1c4177381 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -1317,41 +1317,24 @@ class FreqaiDataKitchen: self, append_df: DataFrame ) -> None: """ - Save prediction dataframe from backtesting to h5 file format + Save prediction dataframe from backtesting to feather file format :param append_df: dataframe for backtesting period """ full_predictions_folder = Path(self.full_path / self.backtest_predictions_folder) if not full_predictions_folder.is_dir(): full_predictions_folder.mkdir(parents=True, exist_ok=True) - append_df.to_hdf(self.backtesting_results_path, key=self.model_filename) + append_df.to_feather(self.backtesting_results_path) def get_backtesting_prediction( self ) -> DataFrame: """ - Get prediction dataframe from h5 file format + Get prediction dataframe from feather file format """ - append_df = self.backtesting_h5_data[self.model_filename] + append_df = pd.read_feather(self.backtesting_results_path) return append_df - def load_prediction_pair_file( - self - ) -> None: - """ - Load prediction file if it exists - """ - pair_file_name = self.pair.split(':')[0].replace('/', '_').lower() - path_to_predictionfile = Path(self.full_path / - self.backtest_predictions_folder / - f"{pair_file_name}_prediction.h5") - self.backtesting_results_path = path_to_predictionfile - file_exists = path_to_predictionfile.is_file() - if file_exists: - self.backtesting_h5_data = pd.HDFStore(path_to_predictionfile) - else: - self.backtesting_h5_data = {} - def check_if_backtest_prediction_is_valid( self, len_backtest_df: int @@ -1363,11 +1346,17 @@ class FreqaiDataKitchen: :return: :boolean: whether the prediction file is valid. """ - if self.model_filename in self.backtesting_h5_data: + path_to_predictionfile = Path(self.full_path / + self.backtest_predictions_folder / + f"{self.model_filename}_prediction.feather") + self.backtesting_results_path = path_to_predictionfile + + file_exists = path_to_predictionfile.is_file() + + if file_exists: append_df = self.get_backtesting_prediction() if len(append_df) == len_backtest_df and 'date' in append_df: - logger.info("Found backtesting prediction file " - f"at {self.backtesting_results_path.name}") + logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") return True else: logger.info("A new backtesting prediction file is required. " @@ -1376,8 +1365,7 @@ class FreqaiDataKitchen: return False else: logger.info( - "Could not find backtesting prediction file " - f"at {self.backtesting_results_path.name}" + f"Could not find backtesting prediction file at {path_to_predictionfile}" ) return False diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index b2f931760..129571d4a 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -275,7 +275,6 @@ class IFreqaiModel(ABC): self.pair_it += 1 train_it = 0 - dk.load_prediction_pair_file() # Loop enforcing the sliding window training/backtesting paradigm # tr_train is the training time range e.g. 1 historical month # tr_backtest is the backtesting time range e.g. the week directly From 3c322bf7dfab2910d223a35d9f04f50799d1e651 Mon Sep 17 00:00:00 2001 From: Matthias Date: Tue, 29 Nov 2022 18:27:08 +0100 Subject: [PATCH 48/71] Improve forceenter validation messages --- freqtrade/rpc/rpc.py | 33 +++++++++++++++++++-------------- tests/rpc/test_rpc.py | 4 ++++ 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 011543a09..334e18dc7 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -740,6 +740,24 @@ class RPC: self._freqtrade.wallets.update() return {'result': f'Created sell order for trade {trade_id}.'} + def _force_entry_validations(self, pair: str, order_side: SignalDirection): + if not self._freqtrade.config.get('force_entry_enable', False): + raise RPCException('Force_entry not enabled.') + + if self._freqtrade.state != State.RUNNING: + raise RPCException('trader is not running') + + if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT: + raise RPCException("Can't go short on Spot markets.") + + if pair not in self._freqtrade.exchange.get_markets(tradable_only=True): + raise RPCException('Symbol does not exist or market is not active.') + # Check if pair quote currency equals to the stake currency. + stake_currency = self._freqtrade.config.get('stake_currency') + if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency: + raise RPCException( + f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.') + def _rpc_force_entry(self, pair: str, price: Optional[float], *, order_type: Optional[str] = None, order_side: SignalDirection = SignalDirection.LONG, @@ -750,21 +768,8 @@ class RPC: Handler for forcebuy Buys a pair trade at the given or current price """ + self._force_entry_validations(pair, order_side) - if not self._freqtrade.config.get('force_entry_enable', False): - raise RPCException('Force_entry not enabled.') - - if self._freqtrade.state != State.RUNNING: - raise RPCException('trader is not running') - - if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT: - raise RPCException("Can't go short on Spot markets.") - - # Check if pair quote currency equals to the stake currency. - stake_currency = self._freqtrade.config.get('stake_currency') - if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency: - raise RPCException( - f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.') # check if valid pair # check if pair already has an open pair diff --git a/tests/rpc/test_rpc.py b/tests/rpc/test_rpc.py index 8828b6f33..24b5f1cbe 100644 --- a/tests/rpc/test_rpc.py +++ b/tests/rpc/test_rpc.py @@ -1056,6 +1056,10 @@ def test_rpc_force_entry(mocker, default_conf, ticker, fee, limit_buy_order_open assert trade.pair == pair assert trade.open_rate == 0.0001 + with pytest.raises(RPCException, + match=r'Symbol does not exist or market is not active.'): + rpc._rpc_force_entry('LTC/NOTHING', 0.0001) + # Test buy pair not with stakes with pytest.raises(RPCException, match=r'Wrong pair selected. Only pairs with stake-currency.*'): From 4571aedb33bac90dcb7f669bfd4c707f1c760173 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 30 Nov 2022 00:53:35 +0100 Subject: [PATCH 49/71] consolidate and clean code --- docs/freqai-running.md | 4 +- freqtrade/freqai/data_kitchen.py | 23 ----------- freqtrade/freqai/freqai_interface.py | 59 +++++++++++++--------------- 3 files changed, 29 insertions(+), 57 deletions(-) diff --git a/docs/freqai-running.md b/docs/freqai-running.md index 23873547f..b046e7bb8 100644 --- a/docs/freqai-running.md +++ b/docs/freqai-running.md @@ -79,11 +79,11 @@ To change your **features**, you **must** set a new `identifier` in the config t To save the models generated during a particular backtest so that you can start a live deployment from one of them instead of training a new model, you must set `save_backtest_models` to `True` in the config. -### Backtest live models +### Backtest live collected predictions FreqAI allow you to reuse live historic predictions through the backtest parameter `--freqai-backtest-live-models`. This can be useful when you want to reuse predictions generated in dry/run for comparison or other study. -The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in historic predictions file. +The `--timerange` parameter must not be informed, as it will be automatically calculated through the data in the historic predictions file. ### Downloading data to cover the full backtest period diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 1c4177381..3201fc451 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -75,7 +75,6 @@ class FreqaiDataKitchen: self.training_features_list: List = [] self.model_filename: str = "" self.backtesting_results_path = Path() - self.backtesting_h5_data: HDFStore = {} self.backtest_predictions_folder: str = "backtesting_predictions" self.live = live self.pair = pair @@ -456,28 +455,6 @@ class FreqaiDataKitchen: # print(tr_training_list, tr_backtesting_list) return tr_training_list_timerange, tr_backtesting_list_timerange - # def split_timerange_live_models( - # self - # ) -> Tuple[list, list]: - - # tr_backtesting_list_timerange = [] - # asset = self.pair.split("/")[0] - # if asset not in self.backtest_live_models_data["assets_end_dates"]: - # raise OperationalException( - # f"Model not available for pair {self.pair}. " - # "Please, try again after removing this pair from the configuration file." - # ) - # asset_data = self.backtest_live_models_data["assets_end_dates"][asset] - # backtesting_timerange = self.backtest_live_models_data["backtesting_timerange"] - # model_end_dates = [x for x in asset_data] - # model_end_dates.append(backtesting_timerange.stopts) - # model_end_dates.sort() - # for index, item in enumerate(model_end_dates): - # if len(model_end_dates) > (index + 1): - # tr_to_add = TimeRange("date", "date", item, model_end_dates[index + 1]) - # tr_backtesting_list_timerange.append(tr_to_add) - - # return tr_backtesting_list_timerange, tr_backtesting_list_timerange def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame: """ diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index 129571d4a..cf7c4151b 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -55,7 +55,6 @@ class IFreqaiModel(ABC): def __init__(self, config: Config) -> None: self.config = config - self.metadata: Dict[str, Any] = {} self.assert_config(self.config) self.freqai_info: Dict[str, Any] = config["freqai"] self.data_split_parameters: Dict[str, Any] = config.get("freqai", {}).get( @@ -102,7 +101,7 @@ class IFreqaiModel(ABC): self.get_corr_dataframes: bool = True self._threads: List[threading.Thread] = [] self._stop_event = threading.Event() - self.metadata = self.dd.load_global_metadata_from_disk() + self.metadata: Dict[str, Any] = self.dd.load_global_metadata_from_disk() self.data_provider: Optional[DataProvider] = None self.max_system_threads = max(int(psutil.cpu_count() * 2 - 2), 1) @@ -148,18 +147,13 @@ class IFreqaiModel(ABC): # the concatenated results for the full backtesting period back to the strategy. elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) - if self.dk.backtest_live_models: - logger.info( - "Backtesting using historic predictions (live models)") - else: - logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") - dataframe = self.dk.use_strategy_to_populate_indicators( - strategy, prediction_dataframe=dataframe, pair=metadata["pair"] - ) if not self.config.get("freqai_backtest_live_models", False): + logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dk = self.start_backtesting(dataframe, metadata, self.dk) dataframe = dk.remove_features_from_df(dk.return_dataframe) else: + logger.info( + "Backtesting using historic predictions (live models)") dk = self.start_backtesting_from_historic_predictions( dataframe, metadata, self.dk) dataframe = dk.return_dataframe @@ -167,7 +161,6 @@ class IFreqaiModel(ABC): self.clean_up() if self.live: self.inference_timer('stop', metadata["pair"]) - self.set_start_dry_live_date(dataframe) return dataframe @@ -336,27 +329,6 @@ class IFreqaiModel(ABC): return dk - def start_backtesting_from_historic_predictions( - self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen - ) -> FreqaiDataKitchen: - """ - :param dataframe: DataFrame = strategy passed dataframe - :param metadata: Dict = pair metadata - :param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only - :return: - FreqaiDataKitchen = Data management/analysis tool associated to present pair only - """ - pair = metadata["pair"] - dk.return_dataframe = dataframe - saved_dataframe = self.dd.historic_predictions[pair] - columns_to_drop = list(set(saved_dataframe.columns).intersection( - dk.return_dataframe.columns)) - dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) - dk.return_dataframe = pd.merge( - dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred") - # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) - return dk - def start_live( self, dataframe: DataFrame, metadata: dict, strategy: IStrategy, dk: FreqaiDataKitchen ) -> FreqaiDataKitchen: @@ -665,6 +637,8 @@ class IFreqaiModel(ABC): self.dd.historic_predictions[pair] = pred_df hist_preds_df = self.dd.historic_predictions[pair] + self.set_start_dry_live_date(pred_df) + for label in hist_preds_df.columns: if hist_preds_df[label].dtype == object: continue @@ -913,6 +887,27 @@ class IFreqaiModel(ABC): pd.to_datetime(live_dataframe.tail(1)["date"].values[0]).timestamp()) self.update_metadata(metadata) + def start_backtesting_from_historic_predictions( + self, dataframe: DataFrame, metadata: dict, dk: FreqaiDataKitchen + ) -> FreqaiDataKitchen: + """ + :param dataframe: DataFrame = strategy passed dataframe + :param metadata: Dict = pair metadata + :param dk: FreqaiDataKitchen = Data management/analysis tool associated to present pair only + :return: + FreqaiDataKitchen = Data management/analysis tool associated to present pair only + """ + pair = metadata["pair"] + dk.return_dataframe = dataframe + saved_dataframe = self.dd.historic_predictions[pair] + columns_to_drop = list(set(saved_dataframe.columns).intersection( + dk.return_dataframe.columns)) + dk.return_dataframe = dk.return_dataframe.drop(columns=list(columns_to_drop)) + dk.return_dataframe = pd.merge( + dk.return_dataframe, saved_dataframe, how='left', left_on='date', right_on="date_pred") + # dk.return_dataframe = dk.return_dataframe[saved_dataframe.columns].fillna(0) + return dk + # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example. From 10a45474e87ac9943593bee1b6b6afe0fd434616 Mon Sep 17 00:00:00 2001 From: rzhb <3757123+rzhb@users.noreply.github.com> Date: Wed, 30 Nov 2022 12:28:21 +0800 Subject: [PATCH 50/71] Update data-analysis.md fix typo in code --- docs/data-analysis.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data-analysis.md b/docs/data-analysis.md index 926ed3eae..7a6c6bb96 100644 --- a/docs/data-analysis.md +++ b/docs/data-analysis.md @@ -83,7 +83,7 @@ from pathlib import Path project_root = "somedir/freqtrade" i=0 try: - os.chdirdir(project_root) + os.chdir(project_root) assert Path('LICENSE').is_file() except: while i<4 and (not Path('LICENSE').is_file()): From 17cf3c7e837123620988908a085d190d9afa9b54 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Wed, 30 Nov 2022 08:28:45 -0300 Subject: [PATCH 51/71] bug fixes and removed fillna from fit_live_predictions --- freqtrade/freqai/freqai_interface.py | 8 ++++++-- tests/freqai/test_freqai_interface.py | 3 ++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/freqai_interface.py b/freqtrade/freqai/freqai_interface.py index cf7c4151b..3386d2881 100644 --- a/freqtrade/freqai/freqai_interface.py +++ b/freqtrade/freqai/freqai_interface.py @@ -147,6 +147,9 @@ class IFreqaiModel(ABC): # the concatenated results for the full backtesting period back to the strategy. elif not self.follow_mode: self.dk = FreqaiDataKitchen(self.config, self.live, metadata["pair"]) + dataframe = self.dk.use_strategy_to_populate_indicators( + strategy, prediction_dataframe=dataframe, pair=metadata["pair"] + ) if not self.config.get("freqai_backtest_live_models", False): logger.info(f"Training {len(self.dk.training_timeranges)} timeranges") dk = self.start_backtesting(dataframe, metadata, self.dk) @@ -637,7 +640,7 @@ class IFreqaiModel(ABC): self.dd.historic_predictions[pair] = pred_df hist_preds_df = self.dd.historic_predictions[pair] - self.set_start_dry_live_date(pred_df) + self.set_start_dry_live_date(strat_df) for label in hist_preds_df.columns: if hist_preds_df[label].dtype == object: @@ -680,7 +683,7 @@ class IFreqaiModel(ABC): if self.dd.historic_predictions[dk.pair][label].dtype == object: continue f = spy.stats.norm.fit( - self.dd.historic_predictions[dk.pair][label].fillna(0).tail(num_candles)) + self.dd.historic_predictions[dk.pair][label].tail(num_candles)) dk.data["labels_mean"][label], dk.data["labels_std"][label] = f[0], f[1] return @@ -844,6 +847,7 @@ class IFreqaiModel(ABC): """ fit_live_predictions_candles = self.freqai_info.get("fit_live_predictions_candles", 0) if fit_live_predictions_candles: + logger.info("Applying fit_live_predictions in backtesting") label_columns = [col for col in dk.full_df.columns if ( col.startswith("&") and not (col.startswith("&") and col.endswith("_mean")) and diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 605485e12..c53137093 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -363,7 +363,8 @@ def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") freqai.dk.pair = "ADA/BTC" - freqai.dk.full_df = df + freqai.dk.full_df = df.fillna(0) + freqai.dk.full_df assert "&-s_close_mean" not in freqai.dk.full_df.columns assert "&-s_close_std" not in freqai.dk.full_df.columns freqai.backtesting_fit_live_predictions(freqai.dk) From e7f72d52b8faddfc35ed27b1840aa6a2c3d69ea7 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Wed, 30 Nov 2022 12:36:26 +0100 Subject: [PATCH 52/71] bring back market side setting in get_state_info --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index e1381ab62..9d2fae583 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -193,6 +193,10 @@ class BaseReinforcementLearningModel(IFreqaiModel): now = datetime.now(timezone.utc).timestamp() trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds) current_profit = trade.calc_profit_ratio(current_rate) + if trade.is_short: + market_side = 0 + else: + market_side = 1 return market_side, current_profit, int(trade_duration) From 79821ebb33bf6eea901e51ae4e24ba8e16837ac4 Mon Sep 17 00:00:00 2001 From: Wagner Costa Date: Wed, 30 Nov 2022 08:41:44 -0300 Subject: [PATCH 53/71] fix flake8 errors --- freqtrade/freqai/data_kitchen.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 3201fc451..c6f22e468 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -10,7 +10,7 @@ import numpy as np import numpy.typing as npt import pandas as pd import psutil -from pandas import DataFrame, HDFStore +from pandas import DataFrame from scipy import stats from sklearn import linear_model from sklearn.cluster import DBSCAN @@ -455,7 +455,6 @@ class FreqaiDataKitchen: # print(tr_training_list, tr_backtesting_list) return tr_training_list_timerange, tr_backtesting_list_timerange - def slice_dataframe(self, timerange: TimeRange, df: DataFrame) -> DataFrame: """ Given a full dataframe, extract the user desired window From 59c7ce02f5c91461c3ee501023dbb2a6e92cd0c2 Mon Sep 17 00:00:00 2001 From: gautier pialat Date: Wed, 30 Nov 2022 21:29:34 +0100 Subject: [PATCH 54/71] binance restricted locations and server location Inform end user before he creates server in a binance restricted location https://github.com/ccxt/ccxt/issues/15872 --- docs/exchanges.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/exchanges.md b/docs/exchanges.md index b4eb7e023..7070fc690 100644 --- a/docs/exchanges.md +++ b/docs/exchanges.md @@ -54,6 +54,9 @@ This configuration enables kraken, as well as rate-limiting to avoid bans from t ## Binance +!!! Warning "Server location and geo-ip restrictions" + Please be aware that binance restrict api access regarding the server country. The currents and non exhaustive countries blocked are United States, Malaysia (Singapour), Ontario (Canada). Please go to [binance terms > b. Eligibility](https://www.binance.com/en/terms) to find up to date list. + Binance supports [time_in_force](configuration.md#understand-order_time_in_force). !!! Tip "Stoploss on Exchange" From 95651fcd5a421e2d0d8eb0211844e4cff187159b Mon Sep 17 00:00:00 2001 From: Matthias Date: Thu, 1 Dec 2022 06:27:19 +0100 Subject: [PATCH 55/71] Improve/simplify telegram exception handling Move exceptionhandling to the decorator. --- freqtrade/rpc/telegram.py | 691 ++++++++++++++++++-------------------- 1 file changed, 322 insertions(+), 369 deletions(-) diff --git a/freqtrade/rpc/telegram.py b/freqtrade/rpc/telegram.py index 708a1ce53..38fe0cd13 100644 --- a/freqtrade/rpc/telegram.py +++ b/freqtrade/rpc/telegram.py @@ -79,6 +79,8 @@ def authorized_only(command_handler: Callable[..., None]) -> Callable[..., Any]: ) try: return command_handler(self, *args, **kwargs) + except RPCException as e: + self._send_msg(str(e)) except BaseException: logger.exception('Exception occurred within Telegram module') @@ -538,72 +540,67 @@ class Telegram(RPCHandler): handler for `/status` and `/status `. """ - try: + # Check if there's at least one numerical ID provided. + # If so, try to get only these trades. + trade_ids = [] + if context.args and len(context.args) > 0: + trade_ids = [int(i) for i in context.args if i.isnumeric()] - # Check if there's at least one numerical ID provided. - # If so, try to get only these trades. - trade_ids = [] - if context.args and len(context.args) > 0: - trade_ids = [int(i) for i in context.args if i.isnumeric()] + results = self._rpc._rpc_trade_status(trade_ids=trade_ids) + position_adjust = self._config.get('position_adjustment_enable', False) + max_entries = self._config.get('max_entry_position_adjustment', -1) + for r in results: + r['open_date_hum'] = arrow.get(r['open_date']).humanize() + r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']]) + r['exit_reason'] = r.get('exit_reason', "") + lines = [ + "*Trade ID:* `{trade_id}`" + + (" `(since {open_date_hum})`" if r['is_open'] else ""), + "*Current Pair:* {pair}", + "*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"), + "*Leverage:* `{leverage}`" if r.get('leverage') else "", + "*Amount:* `{amount} ({stake_amount} {quote_currency})`", + "*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "", + "*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "", + ] - results = self._rpc._rpc_trade_status(trade_ids=trade_ids) - position_adjust = self._config.get('position_adjustment_enable', False) - max_entries = self._config.get('max_entry_position_adjustment', -1) - for r in results: - r['open_date_hum'] = arrow.get(r['open_date']).humanize() - r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']]) - r['exit_reason'] = r.get('exit_reason', "") - lines = [ - "*Trade ID:* `{trade_id}`" + - (" `(since {open_date_hum})`" if r['is_open'] else ""), - "*Current Pair:* {pair}", - "*Direction:* " + ("`Short`" if r.get('is_short') else "`Long`"), - "*Leverage:* `{leverage}`" if r.get('leverage') else "", - "*Amount:* `{amount} ({stake_amount} {quote_currency})`", - "*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "", - "*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "", - ] + if position_adjust: + max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "") + lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str) - if position_adjust: - max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "") - lines.append("*Number of Entries:* `{num_entries}`" + max_buy_str) + lines.extend([ + "*Open Rate:* `{open_rate:.8f}`", + "*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "", + "*Open Date:* `{open_date}`", + "*Close Date:* `{close_date}`" if r['close_date'] else "", + "*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "", + ("*Current Profit:* " if r['is_open'] else "*Close Profit: *") + + "`{profit_ratio:.2%}`", + ]) - lines.extend([ - "*Open Rate:* `{open_rate:.8f}`", - "*Close Rate:* `{close_rate:.8f}`" if r['close_rate'] else "", - "*Open Date:* `{open_date}`", - "*Close Date:* `{close_date}`" if r['close_date'] else "", - "*Current Rate:* `{current_rate:.8f}`" if r['is_open'] else "", - ("*Current Profit:* " if r['is_open'] else "*Close Profit: *") - + "`{profit_ratio:.2%}`", - ]) + if r['is_open']: + if r.get('realized_profit'): + lines.append("*Realized Profit:* `{realized_profit:.8f}`") + if (r['stop_loss_abs'] != r['initial_stop_loss_abs'] + and r['initial_stop_loss_ratio'] is not None): + # Adding initial stoploss only if it is different from stoploss + lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` " + "`({initial_stop_loss_ratio:.2%})`") - if r['is_open']: - if r.get('realized_profit'): - lines.append("*Realized Profit:* `{realized_profit:.8f}`") - if (r['stop_loss_abs'] != r['initial_stop_loss_abs'] - and r['initial_stop_loss_ratio'] is not None): - # Adding initial stoploss only if it is different from stoploss - lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` " - "`({initial_stop_loss_ratio:.2%})`") + # Adding stoploss and stoploss percentage only if it is not None + lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " + + ("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else "")) + lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` " + "`({stoploss_current_dist_ratio:.2%})`") + if r['open_order']: + lines.append( + "*Open Order:* `{open_order}`" + + "- `{exit_order_status}`" if r['exit_order_status'] else "") - # Adding stoploss and stoploss percentage only if it is not None - lines.append("*Stoploss:* `{stop_loss_abs:.8f}` " + - ("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else "")) - lines.append("*Stoploss distance:* `{stoploss_current_dist:.8f}` " - "`({stoploss_current_dist_ratio:.2%})`") - if r['open_order']: - lines.append( - "*Open Order:* `{open_order}`" - + "- `{exit_order_status}`" if r['exit_order_status'] else "") - - lines_detail = self._prepare_order_details( - r['orders'], r['quote_currency'], r['is_open']) - lines.extend(lines_detail if lines_detail else "") - self.__send_status_msg(lines, r) - - except RPCException as e: - self._send_msg(str(e)) + lines_detail = self._prepare_order_details( + r['orders'], r['quote_currency'], r['is_open']) + lines.extend(lines_detail if lines_detail else "") + self.__send_status_msg(lines, r) def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None: """ @@ -630,37 +627,34 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - fiat_currency = self._config.get('fiat_display_currency', '') - statlist, head, fiat_profit_sum = self._rpc._rpc_status_table( - self._config['stake_currency'], fiat_currency) + fiat_currency = self._config.get('fiat_display_currency', '') + statlist, head, fiat_profit_sum = self._rpc._rpc_status_table( + self._config['stake_currency'], fiat_currency) - show_total = not isnan(fiat_profit_sum) and len(statlist) > 1 - max_trades_per_msg = 50 - """ - Calculate the number of messages of 50 trades per message - 0.99 is used to make sure that there are no extra (empty) messages - As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message - """ - messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1) - for i in range(0, messages_count): - trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg] - if show_total and i == messages_count - 1: - # append total line - trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"]) + show_total = not isnan(fiat_profit_sum) and len(statlist) > 1 + max_trades_per_msg = 50 + """ + Calculate the number of messages of 50 trades per message + 0.99 is used to make sure that there are no extra (empty) messages + As an example with 50 trades, there will be int(50/50 + 0.99) = 1 message + """ + messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1) + for i in range(0, messages_count): + trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg] + if show_total and i == messages_count - 1: + # append total line + trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"]) - message = tabulate(trades, - headers=head, - tablefmt='simple') - if show_total and i == messages_count - 1: - # insert separators line between Total - lines = message.split("\n") - message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]]) - self._send_msg(f"
{message}
", parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_status_table", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + message = tabulate(trades, + headers=head, + tablefmt='simple') + if show_total and i == messages_count - 1: + # insert separators line between Total + lines = message.split("\n") + message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]]) + self._send_msg(f"
{message}
", parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_status_table", + query=update.callback_query) @authorized_only def _timeunit_stats(self, update: Update, context: CallbackContext, unit: str) -> None: @@ -686,35 +680,32 @@ class Telegram(RPCHandler): timescale = int(context.args[0]) if context.args else val.default except (TypeError, ValueError, IndexError): timescale = val.default - try: - stats = self._rpc._rpc_timeunit_profit( - timescale, - stake_cur, - fiat_disp_cur, - unit - ) - stats_tab = tabulate( - [[f"{period['date']} ({period['trade_count']})", - f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}", - f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}", - f"{period['rel_profit']:.2%}", - ] for period in stats['data']], - headers=[ - f"{val.header} (count)", - f'{stake_cur}', - f'{fiat_disp_cur}', - 'Profit %', - 'Trades', - ], - tablefmt='simple') - message = ( - f'{val.message} Profit over the last {timescale} {val.message2}:\n' - f'
{stats_tab}
' - ) - self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True, - callback_path=val.callback, query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + stats = self._rpc._rpc_timeunit_profit( + timescale, + stake_cur, + fiat_disp_cur, + unit + ) + stats_tab = tabulate( + [[f"{period['date']} ({period['trade_count']})", + f"{round_coin_value(period['abs_profit'], stats['stake_currency'])}", + f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}", + f"{period['rel_profit']:.2%}", + ] for period in stats['data']], + headers=[ + f"{val.header} (count)", + f'{stake_cur}', + f'{fiat_disp_cur}', + 'Profit %', + 'Trades', + ], + tablefmt='simple') + message = ( + f'{val.message} Profit over the last {timescale} {val.message2}:\n' + f'
{stats_tab}
' + ) + self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True, + callback_path=val.callback, query=update.callback_query) @authorized_only def _daily(self, update: Update, context: CallbackContext) -> None: @@ -878,79 +869,76 @@ class Telegram(RPCHandler): @authorized_only def _balance(self, update: Update, context: CallbackContext) -> None: """ Handler for /balance """ - try: - result = self._rpc._rpc_balance(self._config['stake_currency'], - self._config.get('fiat_display_currency', '')) + result = self._rpc._rpc_balance(self._config['stake_currency'], + self._config.get('fiat_display_currency', '')) - balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0) - if not balance_dust_level: - balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0) + balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0) + if not balance_dust_level: + balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0) - output = '' - if self._config['dry_run']: - output += "*Warning:* Simulated balances in Dry Mode.\n" - starting_cap = round_coin_value( - result['starting_capital'], self._config['stake_currency']) - output += f"Starting capital: `{starting_cap}`" - starting_cap_fiat = round_coin_value( - result['starting_capital_fiat'], self._config['fiat_display_currency'] - ) if result['starting_capital_fiat'] > 0 else '' - output += (f" `, {starting_cap_fiat}`.\n" - ) if result['starting_capital_fiat'] > 0 else '.\n' + output = '' + if self._config['dry_run']: + output += "*Warning:* Simulated balances in Dry Mode.\n" + starting_cap = round_coin_value( + result['starting_capital'], self._config['stake_currency']) + output += f"Starting capital: `{starting_cap}`" + starting_cap_fiat = round_coin_value( + result['starting_capital_fiat'], self._config['fiat_display_currency'] + ) if result['starting_capital_fiat'] > 0 else '' + output += (f" `, {starting_cap_fiat}`.\n" + ) if result['starting_capital_fiat'] > 0 else '.\n' - total_dust_balance = 0 - total_dust_currencies = 0 - for curr in result['currencies']: - curr_output = '' - if curr['est_stake'] > balance_dust_level: - if curr['is_position']: - curr_output = ( - f"*{curr['currency']}:*\n" - f"\t`{curr['side']}: {curr['position']:.8f}`\n" - f"\t`Leverage: {curr['leverage']:.1f}`\n" - f"\t`Est. {curr['stake']}: " - f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") - else: - curr_output = ( - f"*{curr['currency']}:*\n" - f"\t`Available: {curr['free']:.8f}`\n" - f"\t`Balance: {curr['balance']:.8f}`\n" - f"\t`Pending: {curr['used']:.8f}`\n" - f"\t`Est. {curr['stake']}: " - f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") - elif curr['est_stake'] <= balance_dust_level: - total_dust_balance += curr['est_stake'] - total_dust_currencies += 1 - - # Handle overflowing message length - if len(output + curr_output) >= MAX_MESSAGE_LENGTH: - self._send_msg(output) - output = curr_output + total_dust_balance = 0 + total_dust_currencies = 0 + for curr in result['currencies']: + curr_output = '' + if curr['est_stake'] > balance_dust_level: + if curr['is_position']: + curr_output = ( + f"*{curr['currency']}:*\n" + f"\t`{curr['side']}: {curr['position']:.8f}`\n" + f"\t`Leverage: {curr['leverage']:.1f}`\n" + f"\t`Est. {curr['stake']}: " + f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") else: - output += curr_output + curr_output = ( + f"*{curr['currency']}:*\n" + f"\t`Available: {curr['free']:.8f}`\n" + f"\t`Balance: {curr['balance']:.8f}`\n" + f"\t`Pending: {curr['used']:.8f}`\n" + f"\t`Est. {curr['stake']}: " + f"{round_coin_value(curr['est_stake'], curr['stake'], False)}`\n") + elif curr['est_stake'] <= balance_dust_level: + total_dust_balance += curr['est_stake'] + total_dust_currencies += 1 - if total_dust_balance > 0: - output += ( - f"*{total_dust_currencies} Other " - f"{plural(total_dust_currencies, 'Currency', 'Currencies')} " - f"(< {balance_dust_level} {result['stake']}):*\n" - f"\t`Est. {result['stake']}: " - f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n") - tc = result['trade_count'] > 0 - stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else '' - fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else '' + # Handle overflowing message length + if len(output + curr_output) >= MAX_MESSAGE_LENGTH: + self._send_msg(output) + output = curr_output + else: + output += curr_output - output += ("\n*Estimated Value*:\n" - f"\t`{result['stake']}: " - f"{round_coin_value(result['total'], result['stake'], False)}`" - f"{stake_improve}\n" - f"\t`{result['symbol']}: " - f"{round_coin_value(result['value'], result['symbol'], False)}`" - f"{fiat_val}\n") - self._send_msg(output, reload_able=True, callback_path="update_balance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + if total_dust_balance > 0: + output += ( + f"*{total_dust_currencies} Other " + f"{plural(total_dust_currencies, 'Currency', 'Currencies')} " + f"(< {balance_dust_level} {result['stake']}):*\n" + f"\t`Est. {result['stake']}: " + f"{round_coin_value(total_dust_balance, result['stake'], False)}`\n") + tc = result['trade_count'] > 0 + stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else '' + fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else '' + + output += ("\n*Estimated Value*:\n" + f"\t`{result['stake']}: " + f"{round_coin_value(result['total'], result['stake'], False)}`" + f"{stake_improve}\n" + f"\t`{result['symbol']}: " + f"{round_coin_value(result['value'], result['symbol'], False)}`" + f"{fiat_val}\n") + self._send_msg(output, reload_able=True, callback_path="update_balance", + query=update.callback_query) @authorized_only def _start(self, update: Update, context: CallbackContext) -> None: @@ -1125,26 +1113,23 @@ class Telegram(RPCHandler): nrecent = int(context.args[0]) if context.args else 10 except (TypeError, ValueError, IndexError): nrecent = 10 - try: - trades = self._rpc._rpc_trade_history( - nrecent - ) - trades_tab = tabulate( - [[arrow.get(trade['close_date']).humanize(), - trade['pair'] + " (#" + str(trade['trade_id']) + ")", - f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"] - for trade in trades['trades']], - headers=[ - 'Close Date', - 'Pair (ID)', - f'Profit ({stake_cur})', - ], - tablefmt='simple') - message = (f"{min(trades['trades_count'], nrecent)} recent trades:\n" - + (f"
{trades_tab}
" if trades['trades_count'] > 0 else '')) - self._send_msg(message, parse_mode=ParseMode.HTML) - except RPCException as e: - self._send_msg(str(e)) + trades = self._rpc._rpc_trade_history( + nrecent + ) + trades_tab = tabulate( + [[arrow.get(trade['close_date']).humanize(), + trade['pair'] + " (#" + str(trade['trade_id']) + ")", + f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"] + for trade in trades['trades']], + headers=[ + 'Close Date', + 'Pair (ID)', + f'Profit ({stake_cur})', + ], + tablefmt='simple') + message = (f"{min(trades['trades_count'], nrecent)} recent trades:\n" + + (f"
{trades_tab}
" if trades['trades_count'] > 0 else '')) + self._send_msg(message, parse_mode=ParseMode.HTML) @authorized_only def _delete_trade(self, update: Update, context: CallbackContext) -> None: @@ -1155,18 +1140,14 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - if not context.args or len(context.args) == 0: - raise RPCException("Trade-id not set.") - trade_id = int(context.args[0]) - msg = self._rpc._rpc_delete(trade_id) - self._send_msg(( - f"`{msg['result_msg']}`\n" - 'Please make sure to take care of this asset on the exchange manually.' - )) - - except RPCException as e: - self._send_msg(str(e)) + if not context.args or len(context.args) == 0: + raise RPCException("Trade-id not set.") + trade_id = int(context.args[0]) + msg = self._rpc._rpc_delete(trade_id) + self._send_msg(( + f"`{msg['result_msg']}`\n" + 'Please make sure to take care of this asset on the exchange manually.' + )) @authorized_only def _performance(self, update: Update, context: CallbackContext) -> None: @@ -1177,27 +1158,24 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - trades = self._rpc._rpc_performance() - output = "Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['pair']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_performance() + output = "Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['pair']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit_ratio']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_performance", + query=update.callback_query) @authorized_only def _enter_tag_performance(self, update: Update, context: CallbackContext) -> None: @@ -1208,31 +1186,28 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - pair = None - if context.args and isinstance(context.args[0], str): - pair = context.args[0] + pair = None + if context.args and isinstance(context.args[0], str): + pair = context.args[0] - trades = self._rpc._rpc_enter_tag_performance(pair) - output = "Entry Tag Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['enter_tag']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_enter_tag_performance(pair) + output = "Entry Tag Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['enter_tag']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit_ratio']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_enter_tag_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_enter_tag_performance", + query=update.callback_query) @authorized_only def _exit_reason_performance(self, update: Update, context: CallbackContext) -> None: @@ -1243,31 +1218,28 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - pair = None - if context.args and isinstance(context.args[0], str): - pair = context.args[0] + pair = None + if context.args and isinstance(context.args[0], str): + pair = context.args[0] - trades = self._rpc._rpc_exit_reason_performance(pair) - output = "Exit Reason Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['exit_reason']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_exit_reason_performance(pair) + output = "Exit Reason Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['exit_reason']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit_ratio']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_exit_reason_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_exit_reason_performance", + query=update.callback_query) @authorized_only def _mix_tag_performance(self, update: Update, context: CallbackContext) -> None: @@ -1278,31 +1250,28 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - pair = None - if context.args and isinstance(context.args[0], str): - pair = context.args[0] + pair = None + if context.args and isinstance(context.args[0], str): + pair = context.args[0] - trades = self._rpc._rpc_mix_tag_performance(pair) - output = "Mix Tag Performance:\n" - for i, trade in enumerate(trades): - stat_line = ( - f"{i+1}.\t {trade['mix_tag']}\t" - f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " - f"({trade['profit']:.2%}) " - f"({trade['count']})\n") + trades = self._rpc._rpc_mix_tag_performance(pair) + output = "Mix Tag Performance:\n" + for i, trade in enumerate(trades): + stat_line = ( + f"{i+1}.\t {trade['mix_tag']}\t" + f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " + f"({trade['profit']:.2%}) " + f"({trade['count']})\n") - if len(output + stat_line) >= MAX_MESSAGE_LENGTH: - self._send_msg(output, parse_mode=ParseMode.HTML) - output = stat_line - else: - output += stat_line + if len(output + stat_line) >= MAX_MESSAGE_LENGTH: + self._send_msg(output, parse_mode=ParseMode.HTML) + output = stat_line + else: + output += stat_line - self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_mix_tag_performance", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(output, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_mix_tag_performance", + query=update.callback_query) @authorized_only def _count(self, update: Update, context: CallbackContext) -> None: @@ -1313,18 +1282,15 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - try: - counts = self._rpc._rpc_count() - message = tabulate({k: [v] for k, v in counts.items()}, - headers=['current', 'max', 'total stake'], - tablefmt='simple') - message = "
{}
".format(message) - logger.debug(message) - self._send_msg(message, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_count", - query=update.callback_query) - except RPCException as e: - self._send_msg(str(e)) + counts = self._rpc._rpc_count() + message = tabulate({k: [v] for k, v in counts.items()}, + headers=['current', 'max', 'total stake'], + tablefmt='simple') + message = "
{}
".format(message) + logger.debug(message) + self._send_msg(message, parse_mode=ParseMode.HTML, + reload_able=True, callback_path="update_count", + query=update.callback_query) @authorized_only def _locks(self, update: Update, context: CallbackContext) -> None: @@ -1372,22 +1338,19 @@ class Telegram(RPCHandler): Handler for /whitelist Shows the currently active whitelist """ - try: - whitelist = self._rpc._rpc_whitelist() + whitelist = self._rpc._rpc_whitelist() - if context.args: - if "sorted" in context.args: - whitelist['whitelist'] = sorted(whitelist['whitelist']) - if "baseonly" in context.args: - whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']] + if context.args: + if "sorted" in context.args: + whitelist['whitelist'] = sorted(whitelist['whitelist']) + if "baseonly" in context.args: + whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']] - message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n" - message += f"`{', '.join(whitelist['whitelist'])}`" + message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n" + message += f"`{', '.join(whitelist['whitelist'])}`" - logger.debug(message) - self._send_msg(message) - except RPCException as e: - self._send_msg(str(e)) + logger.debug(message) + self._send_msg(message) @authorized_only def _blacklist(self, update: Update, context: CallbackContext) -> None: @@ -1425,30 +1388,27 @@ class Telegram(RPCHandler): Shows the latest logs """ try: - try: - limit = int(context.args[0]) if context.args else 10 - except (TypeError, ValueError, IndexError): - limit = 10 - logs = RPC._rpc_get_logs(limit)['logs'] - msgs = '' - msg_template = "*{}* {}: {} \\- `{}`" - for logrec in logs: - msg = msg_template.format(escape_markdown(logrec[0], version=2), - escape_markdown(logrec[2], version=2), - escape_markdown(logrec[3], version=2), - escape_markdown(logrec[4], version=2)) - if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH: - # Send message immediately if it would become too long - self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) - msgs = msg + '\n' - else: - # Append message to messages to send - msgs += msg + '\n' - - if msgs: + limit = int(context.args[0]) if context.args else 10 + except (TypeError, ValueError, IndexError): + limit = 10 + logs = RPC._rpc_get_logs(limit)['logs'] + msgs = '' + msg_template = "*{}* {}: {} \\- `{}`" + for logrec in logs: + msg = msg_template.format(escape_markdown(logrec[0], version=2), + escape_markdown(logrec[2], version=2), + escape_markdown(logrec[3], version=2), + escape_markdown(logrec[4], version=2)) + if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH: + # Send message immediately if it would become too long self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) - except RPCException as e: - self._send_msg(str(e)) + msgs = msg + '\n' + else: + # Append message to messages to send + msgs += msg + '\n' + + if msgs: + self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) @authorized_only def _edge(self, update: Update, context: CallbackContext) -> None: @@ -1456,21 +1416,17 @@ class Telegram(RPCHandler): Handler for /edge Shows information related to Edge """ - try: - edge_pairs = self._rpc._rpc_edge() - if not edge_pairs: - message = 'Edge only validated following pairs:' - self._send_msg(message, parse_mode=ParseMode.HTML) + edge_pairs = self._rpc._rpc_edge() + if not edge_pairs: + message = 'Edge only validated following pairs:' + self._send_msg(message, parse_mode=ParseMode.HTML) - for chunk in chunks(edge_pairs, 25): - edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple') - message = (f'Edge only validated following pairs:\n' - f'
{edge_pairs_tab}
') + for chunk in chunks(edge_pairs, 25): + edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple') + message = (f'Edge only validated following pairs:\n' + f'
{edge_pairs_tab}
') - self._send_msg(message, parse_mode=ParseMode.HTML) - - except RPCException as e: - self._send_msg(str(e)) + self._send_msg(message, parse_mode=ParseMode.HTML) @authorized_only def _help(self, update: Update, context: CallbackContext) -> None: @@ -1551,12 +1507,9 @@ class Telegram(RPCHandler): Handler for /health Shows the last process timestamp """ - try: - health = self._rpc._health() - message = f"Last process: `{health['last_process_loc']}`" - self._send_msg(message) - except RPCException as e: - self._send_msg(str(e)) + health = self._rpc._health() + message = f"Last process: `{health['last_process_loc']}`" + self._send_msg(message) @authorized_only def _version(self, update: Update, context: CallbackContext) -> None: From 4a9982f86bdc340441cd9c7fff1259f9813a715d Mon Sep 17 00:00:00 2001 From: Emre Date: Thu, 1 Dec 2022 10:08:42 +0300 Subject: [PATCH 56/71] Fix sb3_contrib loading issue --- freqtrade/freqai/RL/BaseReinforcementLearningModel.py | 2 +- freqtrade/freqai/data_drawer.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 9d2fae583..81f8edfc4 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -64,7 +64,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.policy_type = self.freqai_info['rl_config']['policy_type'] self.unset_outlier_removal() self.net_arch = self.rl_config.get('net_arch', [128, 128]) - self.dd.model_type = "stable_baselines" + self.dd.model_type = import_str def unset_outlier_removal(self): """ diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 99e3686b3..5e1f3a344 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -503,7 +503,7 @@ class FreqaiDataDrawer: dump(model, save_path / f"{dk.model_filename}_model.joblib") elif self.model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif 'stable_baselines' in self.model_type: + elif self.model_type in ['stable_baselines3', 'sb3_contrib']: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -589,9 +589,9 @@ class FreqaiDataDrawer: elif self.model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif self.model_type == 'stable_baselines': + elif self.model_type in ['stable_baselines3', 'sb3_contrib']: mod = importlib.import_module( - 'stable_baselines3', self.freqai_info['rl_config']['model_type']) + self.model_type, self.freqai_info['rl_config']['model_type']) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") From 396e666e9b46c4447907c9c093bef67931b09087 Mon Sep 17 00:00:00 2001 From: Emre Date: Thu, 1 Dec 2022 11:03:51 +0300 Subject: [PATCH 57/71] Keep old behavior of model loading --- freqtrade/freqai/data_drawer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 5e1f3a344..848fb20eb 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -503,7 +503,7 @@ class FreqaiDataDrawer: dump(model, save_path / f"{dk.model_filename}_model.joblib") elif self.model_type == 'keras': model.save(save_path / f"{dk.model_filename}_model.h5") - elif self.model_type in ['stable_baselines3', 'sb3_contrib']: + elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type: model.save(save_path / f"{dk.model_filename}_model.zip") if dk.svm_model is not None: @@ -589,7 +589,7 @@ class FreqaiDataDrawer: elif self.model_type == 'keras': from tensorflow import keras model = keras.models.load_model(dk.data_path / f"{dk.model_filename}_model.h5") - elif self.model_type in ['stable_baselines3', 'sb3_contrib']: + elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type: mod = importlib.import_module( self.model_type, self.freqai_info['rl_config']['model_type']) MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) From 2b3e166dc2590f994aebce2329a99f041b1aec0e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Thu, 1 Dec 2022 10:10:28 +0100 Subject: [PATCH 58/71] fix fees RL --- freqtrade/freqai/RL/BaseEnvironment.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 66bdb8435..e7bd26a92 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -194,12 +194,12 @@ class BaseEnvironment(gym.Env): if self._position == Positions.Neutral: return 0. elif self._position == Positions.Short: - current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) - last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) - return (last_trade_price - current_price) / last_trade_price - elif self._position == Positions.Long: current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) + return (last_trade_price - current_price) / last_trade_price + elif self._position == Positions.Long: + current_price = self.add_exit_fee(self.prices.iloc[self._current_tick].open) + last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) return (current_price - last_trade_price) / last_trade_price else: return 0. From 05424045b0f5ec1bc6221bd5114a65d4922cebef Mon Sep 17 00:00:00 2001 From: Matthias Date: Fri, 2 Dec 2022 06:12:21 +0100 Subject: [PATCH 59/71] Temporarily disable since binance blocks US --- tests/exchange/test_ccxt_compat.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/exchange/test_ccxt_compat.py b/tests/exchange/test_ccxt_compat.py index 55d463c68..280876ae8 100644 --- a/tests/exchange/test_ccxt_compat.py +++ b/tests/exchange/test_ccxt_compat.py @@ -28,15 +28,15 @@ EXCHANGES = { 'leverage_tiers_public': False, 'leverage_in_spot_market': False, }, - 'binance': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '5m', - 'futures': True, - 'leverage_tiers_public': False, - 'leverage_in_spot_market': False, - }, + # 'binance': { + # 'pair': 'BTC/USDT', + # 'stake_currency': 'USDT', + # 'hasQuoteVolume': True, + # 'timeframe': '5m', + # 'futures': True, + # 'leverage_tiers_public': False, + # 'leverage_in_spot_market': False, + # }, 'kraken': { 'pair': 'BTC/USDT', 'stake_currency': 'USDT', From 7ddf7ec0aecb8366b28c9131fd21dd5c800b2f2e Mon Sep 17 00:00:00 2001 From: Robert Caulk Date: Fri, 2 Dec 2022 11:28:00 +0100 Subject: [PATCH 60/71] Update freqai-parameter-table.md --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index f2a52a9b8..30ae9c62e 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -37,7 +37,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN.
**Datatype:** Positive integer. | `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset.
**Datatype:** List of positive integers. | `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis)
**Datatype:** Boolean.
Default: `False`. -| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features.
**Datatype:** Integer.
Default: `0`. +| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/identifier/sub-train-COIN_`.
**Datatype:** Integer.
Default: `0`. | `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di).
**Datatype:** Positive float (typically < 1). | `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Boolean. | `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Dictionary. From 38d3b4cab2e201f995682f2f5b9eee1049d23eba Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Dec 2022 11:29:21 +0100 Subject: [PATCH 61/71] add details to doc plot_feature_importance doc --- docs/freqai-parameter-table.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 30ae9c62e..d05ce80f3 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -37,7 +37,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `populate_any_indicators()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN.
**Datatype:** Positive integer. | `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset.
**Datatype:** List of positive integers. | `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis)
**Datatype:** Boolean.
Default: `False`. -| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models/identifier/sub-train-COIN_`.
**Datatype:** Integer.
Default: `0`. +| `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models//sub-train-_.html`.
**Datatype:** Integer.
Default: `0`. | `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di).
**Datatype:** Positive float (typically < 1). | `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Boolean. | `svm_params` | All parameters available in Sklearn's `SGDOneClassSVM()`. See details about some select parameters [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Dictionary. From f7b4fc5bbc0a6b652d83e780b6b950b9cbc8f70a Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sun, 4 Dec 2022 22:22:23 +1100 Subject: [PATCH 62/71] Update freqai-reinforcement-learning.md Change typo of default Tensorboard port to reflect correct port (6006) --- docs/freqai-reinforcement-learning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/freqai-reinforcement-learning.md b/docs/freqai-reinforcement-learning.md index 353d7a2cc..b1a212a92 100644 --- a/docs/freqai-reinforcement-learning.md +++ b/docs/freqai-reinforcement-learning.md @@ -243,7 +243,7 @@ cd freqtrade tensorboard --logdir user_data/models/unique-id ``` -where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). +where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell to view the output in their browser at 127.0.0.1:6006 (6006 is the default port used by Tensorboard). ![tensorboard](assets/tensorboard.jpg) From 133a081a394828f1b45e0b3c90223adca96388de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:00:39 +0000 Subject: [PATCH 63/71] Bump pytest-random-order from 1.0.4 to 1.1.0 Bumps [pytest-random-order](https://github.com/jbasko/pytest-random-order) from 1.0.4 to 1.1.0. - [Release notes](https://github.com/jbasko/pytest-random-order/releases) - [Commits](https://github.com/jbasko/pytest-random-order/compare/v1.0.4...v1.1.0) --- updated-dependencies: - dependency-name: pytest-random-order dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index ffce3d696..463d2656a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -15,7 +15,7 @@ pytest==7.2.0 pytest-asyncio==0.20.2 pytest-cov==4.0.0 pytest-mock==3.10.0 -pytest-random-order==1.0.4 +pytest-random-order==1.1.0 isort==5.10.1 # For datetime mocking time-machine==2.8.2 From 16bad8dca6235a2d7272b753b4f3fa35698f61de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:00:56 +0000 Subject: [PATCH 64/71] Bump pymdown-extensions from 9.8 to 9.9 Bumps [pymdown-extensions](https://github.com/facelessuser/pymdown-extensions) from 9.8 to 9.9. - [Release notes](https://github.com/facelessuser/pymdown-extensions/releases) - [Commits](https://github.com/facelessuser/pymdown-extensions/compare/9.8...9.9) --- updated-dependencies: - dependency-name: pymdown-extensions dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- docs/requirements-docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 224e9b548..7a2c806ca 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -2,5 +2,5 @@ markdown==3.3.7 mkdocs==1.4.2 mkdocs-material==8.5.10 mdx_truly_sane_lists==1.3 -pymdown-extensions==9.8 +pymdown-extensions==9.9 jinja2==3.1.2 From 441069f36390ad90227648fbb7a69b760b97e04e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:01:03 +0000 Subject: [PATCH 65/71] Bump pandas from 1.5.1 to 1.5.2 Bumps [pandas](https://github.com/pandas-dev/pandas) from 1.5.1 to 1.5.2. - [Release notes](https://github.com/pandas-dev/pandas/releases) - [Changelog](https://github.com/pandas-dev/pandas/blob/main/RELEASE.md) - [Commits](https://github.com/pandas-dev/pandas/compare/v1.5.1...v1.5.2) --- updated-dependencies: - dependency-name: pandas dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dab8ae414..755c96da2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ numpy==1.23.5 -pandas==1.5.1 +pandas==1.5.2 pandas-ta==0.3.14b ccxt==2.2.36 From caae4441e5a5b604cd5d8f5994dfd80ef3a71a7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:01:12 +0000 Subject: [PATCH 66/71] Bump jsonschema from 4.17.1 to 4.17.3 Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.17.1 to 4.17.3. - [Release notes](https://github.com/python-jsonschema/jsonschema/releases) - [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) - [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.17.1...v4.17.3) --- updated-dependencies: - dependency-name: jsonschema dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dab8ae414..62887f995 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ arrow==1.2.3 cachetools==4.2.2 requests==2.28.1 urllib3==1.26.13 -jsonschema==4.17.1 +jsonschema==4.17.3 TA-Lib==0.4.25 technical==1.3.0 tabulate==0.9.0 From 66bb2c52532167b9f64aadc7ca9cb739b1980119 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:01:16 +0000 Subject: [PATCH 67/71] Bump fastapi from 0.87.0 to 0.88.0 Bumps [fastapi](https://github.com/tiangolo/fastapi) from 0.87.0 to 0.88.0. - [Release notes](https://github.com/tiangolo/fastapi/releases) - [Commits](https://github.com/tiangolo/fastapi/compare/0.87.0...0.88.0) --- updated-dependencies: - dependency-name: fastapi dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dab8ae414..d9c479444 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,7 +36,7 @@ orjson==3.8.2 sdnotify==0.3.2 # API Server -fastapi==0.87.0 +fastapi==0.88.0 pydantic==1.10.2 uvicorn==0.20.0 pyjwt==2.6.0 From 2eb8f9f0282ca0f9f30731265f667c31521760bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 03:03:47 +0000 Subject: [PATCH 68/71] Bump pypa/gh-action-pypi-publish from 1.5.1 to 1.6.1 Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.5.1 to 1.6.1. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.5.1...v1.6.1) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e730d1489..273fb7ea0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -410,7 +410,7 @@ jobs: python setup.py sdist bdist_wheel - name: Publish to PyPI (Test) - uses: pypa/gh-action-pypi-publish@v1.5.1 + uses: pypa/gh-action-pypi-publish@v1.6.1 if: (github.event_name == 'release') with: user: __token__ @@ -418,7 +418,7 @@ jobs: repository_url: https://test.pypi.org/legacy/ - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.5.1 + uses: pypa/gh-action-pypi-publish@v1.6.1 if: (github.event_name == 'release') with: user: __token__ From 82d4dca1832ac306ea95dfdda52cd21d9cf8a68a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 05:21:15 +0000 Subject: [PATCH 69/71] Bump mkdocs-material from 8.5.10 to 8.5.11 Bumps [mkdocs-material](https://github.com/squidfunk/mkdocs-material) from 8.5.10 to 8.5.11. - [Release notes](https://github.com/squidfunk/mkdocs-material/releases) - [Changelog](https://github.com/squidfunk/mkdocs-material/blob/master/CHANGELOG) - [Commits](https://github.com/squidfunk/mkdocs-material/compare/8.5.10...8.5.11) --- updated-dependencies: - dependency-name: mkdocs-material dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- docs/requirements-docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 7a2c806ca..fd4f66d71 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -1,6 +1,6 @@ markdown==3.3.7 mkdocs==1.4.2 -mkdocs-material==8.5.10 +mkdocs-material==8.5.11 mdx_truly_sane_lists==1.3 pymdown-extensions==9.9 jinja2==3.1.2 From 179adea0e221eb2c85607a6a2e7b2a34e9116640 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 05:22:42 +0000 Subject: [PATCH 70/71] Bump ccxt from 2.2.36 to 2.2.67 Bumps [ccxt](https://github.com/ccxt/ccxt) from 2.2.36 to 2.2.67. - [Release notes](https://github.com/ccxt/ccxt/releases) - [Changelog](https://github.com/ccxt/ccxt/blob/master/exchanges.cfg) - [Commits](https://github.com/ccxt/ccxt/compare/2.2.36...2.2.67) --- updated-dependencies: - dependency-name: ccxt dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e9c5030e..99cdca11e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.23.5 pandas==1.5.2 pandas-ta==0.3.14b -ccxt==2.2.36 +ccxt==2.2.67 # Pin cryptography for now due to rust build errors with piwheels cryptography==38.0.1; platform_machine == 'armv7l' cryptography==38.0.4; platform_machine != 'armv7l' From 102ab91fa44ee1873f844b9fb9fe36e914910aef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 05:23:02 +0000 Subject: [PATCH 71/71] Bump orjson from 3.8.2 to 3.8.3 Bumps [orjson](https://github.com/ijl/orjson) from 3.8.2 to 3.8.3. - [Release notes](https://github.com/ijl/orjson/releases) - [Changelog](https://github.com/ijl/orjson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ijl/orjson/compare/3.8.2...3.8.3) --- updated-dependencies: - dependency-name: orjson dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e9c5030e..51d3f9dde 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ py_find_1st==1.1.5 # Load ticker files 30% faster python-rapidjson==1.9 # Properly format api responses -orjson==3.8.2 +orjson==3.8.3 # Notify systemd sdnotify==0.3.2