Add support for train without specify pretrained model, add support for selecting v2 48k as training setting, and add support for auto remove pretrained model when the user do not have pretrained model in designate folder. (#528)

* support detection of pretrained model, support train without pretrained model path in web ui

* support detection of pretrained model, support train without pretrained model path in web ui

* support detection of pretrained model, support train without pretrained model path in web ui
This commit is contained in:
LINKANG ZHAN
2023-06-15 10:21:58 +08:00
committed by GitHub
parent eb1a88cf7e
commit f349adc9df
2 changed files with 62 additions and 42 deletions

View File

@@ -191,18 +191,24 @@ def run(rank, n_gpus, hps):
# traceback.print_exc()
epoch_str = 1
global_step = 0
if rank == 0:
logger.info("loaded pretrained %s %s" % (hps.pretrainG, hps.pretrainD))
print(
net_g.module.load_state_dict(
torch.load(hps.pretrainG, map_location="cpu")["model"]
if hps.pretrainG != "":
if rank == 0:
logger.info("loaded pretrained %s" % (hps.pretrainG))
print(
net_g.module.load_state_dict(
torch.load(hps.pretrainG, map_location="cpu")["model"]
)
) ##测试不加载优化器
if hps.pretrainD != "":
if rank == 0:
logger.info("loaded pretrained %s" % (hps.pretrainD))
print(
net_d.module.load_state_dict(
torch.load(hps.pretrainD, map_location="cpu")["model"]
)
)
) ##测试不加载优化器
print(
net_d.module.load_state_dict(
torch.load(hps.pretrainD, map_location="cpu")["model"]
)
)
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(
optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2