mirror of
https://github.com/freqtrade/freqtrade.git
synced 2026-02-06 06:10:24 +00:00
docs: update doc samples to use lowercased tuple and dict
This commit is contained in:
@@ -37,8 +37,8 @@ class SuperDuperHyperOptLoss(IHyperOptLoss):
|
||||
min_date: datetime,
|
||||
max_date: datetime,
|
||||
config: Config,
|
||||
processed: Dict[str, DataFrame],
|
||||
backtest_stats: Dict[str, Any],
|
||||
processed: dict[str, DataFrame],
|
||||
backtest_stats: dict[str, Any],
|
||||
**kwargs,
|
||||
) -> float:
|
||||
"""
|
||||
@@ -103,7 +103,7 @@ class MyAwesomeStrategy(IStrategy):
|
||||
SKDecimal(0.01, 0.20, decimals=3, name='roi_p3'),
|
||||
]
|
||||
|
||||
def generate_roi_table(params: Dict) -> Dict[int, float]:
|
||||
def generate_roi_table(params: Dict) -> dict[int, float]:
|
||||
|
||||
roi_table = {}
|
||||
roi_table[0] = params['roi_p1'] + params['roi_p2'] + params['roi_p3']
|
||||
|
||||
@@ -293,10 +293,10 @@ class MyCoolPyTorchClassifier(BasePyTorchClassifier):
|
||||
super().__init__(**kwargs)
|
||||
config = self.freqai_info.get("model_training_parameters", {})
|
||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
||||
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
||||
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
||||
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
|
||||
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
|
||||
|
||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
"""
|
||||
User sets up the training and test data to fit their desired model here
|
||||
:param data_dictionary: the dictionary holding all data for train, test,
|
||||
@@ -359,10 +359,10 @@ class PyTorchMLPRegressor(BasePyTorchRegressor):
|
||||
super().__init__(**kwargs)
|
||||
config = self.freqai_info.get("model_training_parameters", {})
|
||||
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
||||
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
||||
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
||||
self.model_kwargs: dict[str, Any] = config.get("model_kwargs", {})
|
||||
self.trainer_kwargs: dict[str, Any] = config.get("trainer_kwargs", {})
|
||||
|
||||
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
def fit(self, data_dictionary: dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||
n_features = data_dictionary["train_features"].shape[-1]
|
||||
model = PyTorchMLPModel(
|
||||
input_dim=n_features,
|
||||
|
||||
@@ -835,7 +835,7 @@ class DigDeeperStrategy(IStrategy):
|
||||
current_entry_rate: float, current_exit_rate: float,
|
||||
current_entry_profit: float, current_exit_profit: float,
|
||||
**kwargs
|
||||
) -> Union[Optional[float], Tuple[Optional[float], Optional[str]]]:
|
||||
) -> Union[Optional[float], tuple[Optional[float], Optional[str]]]:
|
||||
"""
|
||||
Custom trade adjustment logic, returning the stake amount that a trade should be
|
||||
increased or decreased.
|
||||
|
||||
@@ -780,7 +780,7 @@ class MyCoolFreqaiModel(BaseRegressionModel):
|
||||
|
||||
def predict(
|
||||
self, unfiltered_df: DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||
) -> tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||
|
||||
# ... your custom stuff
|
||||
|
||||
|
||||
Reference in New Issue
Block a user