fix: all logger format

according to #1159
This commit is contained in:
源文雨
2023-09-02 12:09:19 +08:00
parent dace5a6f99
commit b3f22dcdef
8 changed files with 33 additions and 83 deletions

View File

@@ -617,7 +617,7 @@ class SynthesizerTrnMs256NSFsid(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):
@@ -735,7 +735,7 @@ class SynthesizerTrnMs768NSFsid(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):
@@ -850,7 +850,7 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):
@@ -958,7 +958,7 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):

View File

@@ -621,7 +621,7 @@ class SynthesizerTrnMsNSFsidM(nn.Module):
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
self.speaker_map = None
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
"gin_channels: " + gin_channels + ", self.spk_embed_dim: " + self.spk_embed_dim
)
def remove_weight_norm(self):

View File

@@ -695,4 +695,4 @@ if __name__ == "__main__":
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
# f0 = rmvpe.infer_from_audio(audio, thred=thred)
t1 = ttime()
logger.info(f0.shape, t1 - t0)
logger.info("%s %.2f", f0.shape, t1 - t0)

View File

@@ -113,7 +113,7 @@ class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset):
try:
spec = torch.load(spec_filename)
except:
logger.warn(spec_filename, traceback.format_exc())
logger.warn("%s %s", spec_filename, traceback.format_exc())
spec = spectrogram_torch(
audio_norm,
self.filter_length,
@@ -305,7 +305,7 @@ class TextAudioLoader(torch.utils.data.Dataset):
try:
spec = torch.load(spec_filename)
except:
logger.warn(spec_filename, traceback.format_exc())
logger.warn("%s %s", spec_filename, traceback.format_exc())
spec = spectrogram_torch(
audio_norm,
self.filter_length,

View File

@@ -54,9 +54,9 @@ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False)
"""
# Validation
if torch.min(y) < -1.07:
logger.debug("min value is ", torch.min(y))
logger.debug("min value is %s", str(torch.min(y)))
if torch.max(y) > 1.07:
logger.debug("max value is ", torch.max(y))
logger.debug("max value is %s", str(torch.max(y)))
# Window - Cache if needed
global hann_window

View File

@@ -35,12 +35,12 @@ def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1):
if saved_state_dict[k].shape != state_dict[k].shape:
logger.warn(
"shape-%s-mismatch. need: %s, get: %s"
% (k, state_dict[k].shape, saved_state_dict[k].shape)
, k, state_dict[k].shape, saved_state_dict[k].shape
) #
raise KeyError
except:
# logger.info(traceback.format_exc())
logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
logger.info("%s is not in the checkpoint", k) # pretrain缺失的
new_state_dict[k] = v # 模型自带的随机值
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict, strict=False)
@@ -111,12 +111,12 @@ def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
if saved_state_dict[k].shape != state_dict[k].shape:
logger.warn(
"shape-%s-mismatch|need-%s|get-%s"
% (k, state_dict[k].shape, saved_state_dict[k].shape)
, k, state_dict[k].shape, saved_state_dict[k].shape
) #
raise KeyError
except:
# logger.info(traceback.format_exc())
logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
logger.info("%s is not in the checkpoint", k) # pretrain缺失的
new_state_dict[k] = v # 模型自带的随机值
if hasattr(model, "module"):
model.module.load_state_dict(new_state_dict, strict=False)