From d9b8f462827c8d658133de2830e9f41db5f0b9a4 Mon Sep 17 00:00:00 2001 From: Matthias Date: Mon, 11 Nov 2024 20:24:01 +0100 Subject: [PATCH] docs: update doc samples to use lowercased tuple and dict --- docs/advanced-hyperopt.md | 6 +++--- docs/freqai-configuration.md | 12 ++++++------ docs/strategy-callbacks.md | 2 +- docs/strategy_migration.md | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/advanced-hyperopt.md b/docs/advanced-hyperopt.md index e276bed94..480b20daf 100644 --- a/docs/advanced-hyperopt.md +++ b/docs/advanced-hyperopt.md @@ -37,8 +37,8 @@ class SuperDuperHyperOptLoss(IHyperOptLoss): min_date: datetime, max_date: datetime, config: Config, - processed: Dict[str, DataFrame], - backtest_stats: Dict[str, Any], + processed: dict[str, DataFrame], + backtest_stats: dict[str, Any], **kwargs, ) -> float: """ @@ -103,7 +103,7 @@ class MyAwesomeStrategy(IStrategy): SKDecimal(0.01, 0.20, decimals=3, name='roi_p3'), ] - def generate_roi_table(params: Dict) -> Dict[int, float]: + def generate_roi_table(params: Dict) -> dict[int, float]: roi_table = {} roi_table[0] = params['roi_p1'] + params['roi_p2'] + params['roi_p3'] diff --git a/docs/freqai-configuration.md b/docs/freqai-configuration.md index 5caa89a42..3f01ca81b 100644 --- a/docs/freqai-configuration.md +++ b/docs/freqai-configuration.md @@ -293,10 +293,10 @@ class MyCoolPyTorchClassifier(BasePyTorchClassifier): super().__init__(**kwargs) config = self.freqai_info.get("model_training_parameters", {}) self.learning_rate: float = config.get("learning_rate", 3e-4) - self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) - self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) + self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {}) + self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {}) - def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any: + def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any: """ User sets up the training and test data to fit their desired model here :param data_dictionary: the dictionary holding all data for train, test, @@ -359,10 +359,10 @@ class PyTorchMLPRegressor(BasePyTorchRegressor): super().__init__(**kwargs) config = self.freqai_info.get("model_training_parameters", {}) self.learning_rate: float = config.get("learning_rate", 3e-4) - self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) - self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) + self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {}) + self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {}) - def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any: + def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any: n_features = data_dictionary["train_features"].shape[-1] model = PyTorchMLPModel( input_dim=n_features, diff --git a/docs/strategy-callbacks.md b/docs/strategy-callbacks.md index 78c6c7380..715ebdde2 100644 --- a/docs/strategy-callbacks.md +++ b/docs/strategy-callbacks.md @@ -835,7 +835,7 @@ class DigDeeperStrategy(IStrategy): current_entry_rate: float, current_exit_rate: float, current_entry_profit: float, current_exit_profit: float, **kwargs - ) -> Union[Optional[float], Tuple[Optional[float], Optional[str]]]: + ) -> Union[Optional[float], tuple[Optional[float], Optional[str]]]: """ Custom trade adjustment logic, returning the stake amount that a trade should be increased or decreased. diff --git a/docs/strategy_migration.md b/docs/strategy_migration.md index b423eca6e..a9748c413 100644 --- a/docs/strategy_migration.md +++ b/docs/strategy_migration.md @@ -780,7 +780,7 @@ class MyCoolFreqaiModel(BaseRegressionModel): def predict( self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs - ) -> Tuple[DataFrame, npt.NDArray[np.int_]]: + ) -> tuple[DataFrame, npt.NDArray[np.int_]]: # ... your custom stuff