This commit is contained in:
viotemp1
2025-05-17 01:15:10 +03:00
parent 5b92af6a90
commit 28e5efc902
6 changed files with 8 additions and 6 deletions

View File

@@ -16,6 +16,7 @@ usage: freqtrade hyperopt [-h] [-v] [--no-color] [--logfile FILE] [-V]
[--random-state INT] [--min-trades INT]
[--hyperopt-loss NAME] [--disable-param-export]
[--ignore-missing-spaces] [--analyze-per-epoch]
[--early-stop INT]
options:
-h, --help show this help message and exit
@@ -87,6 +88,8 @@ options:
Suppress errors for any requested Hyperopt spaces that
do not contain any parameters.
--analyze-per-epoch Run populate_indicators once per epoch.
--early-stop INT Early stop hyperopt if no improvement after (default:
0) epochs.
Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).

View File

@@ -490,7 +490,7 @@ freqtrade hyperopt --config config.json --hyperopt-loss <hyperoptlossname> --str
```
The `-e` option will set how many evaluations hyperopt will do. Since hyperopt uses Bayesian search, running too many epochs at once may not produce greater results. Experience has shown that best results are usually not improving much after 500-1000 epochs.
The `-es` option will set after how many epochs with no improvements hyperopt will stop. A good value is 20-30% of the total epochs. Early stop is by default disabled (`-es=0`)
The `--early-stop` option will set after how many epochs with no improvements hyperopt will stop. A good value is 20-30% of the total epochs. Early stop is by default disabled (`--early-stop=0`)
Doing multiple runs (executions) with a few 1000 epochs and different random state will most likely produce different results.

View File

@@ -263,7 +263,6 @@ AVAILABLE_CLI_OPTIONS = {
default=constants.HYPEROPT_EPOCH,
),
"early_stop": Arg(
"-es",
"--early-stop",
help="Early stop hyperopt if no improvement after (default: %(default)d) epochs.",
type=check_int_positive,

View File

@@ -335,6 +335,7 @@ class Configuration:
]
self._args_to_config_loop(config, configurations)
if self.args.get("early_stop", 0) > 0:
config.update({"early_stop": self.args["early_stop"]})
logger.info(
f"Parameter --early-stop detected ... Will early stop hyperopt if no improvement "
f"after {self.args.get('early_stop')} epochs ..."

View File

@@ -318,7 +318,7 @@ class Hyperopt:
gc.collect()
if (
self.hyperopter.es_batches > 0
self.hyperopter.es_epochs > 0
and self.hyperopter.es_terminator.should_terminate(self.opt)
):
logger.info(f"Early stopping after {(i + 1) * jobs} epochs")

View File

@@ -106,7 +106,6 @@ class HyperOptimizer:
self.market_change = 0.0
self.es_epochs = config.get("early_stop", 0)
self.es_batches = self.es_epochs // config.get("hyperopt_jobs", 1)
if self.es_epochs > 0 and self.es_epochs < 0.2 * config.get("epochs", 0):
logger.warning(f"Early stop epochs {self.es_epochs} lower than 20% of total epochs")
@@ -430,10 +429,10 @@ class HyperOptimizer:
else:
sampler = o_sampler
if self.es_batches > 0:
if self.es_epochs > 0:
with warnings.catch_warnings():
warnings.filterwarnings(action="ignore", category=ExperimentalWarning)
self.es_terminator = Terminator(BestValueStagnationEvaluator(self.es_batches))
self.es_terminator = Terminator(BestValueStagnationEvaluator(self.es_epochs))
logger.info(f"Using optuna sampler {o_sampler}.")
return optuna.create_study(sampler=sampler, direction="minimize")