Format code (#1162)

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
github-actions[bot]
2023-09-02 11:50:52 +08:00
committed by GitHub
parent a86806b01a
commit dace5a6f99
18 changed files with 53 additions and 12 deletions

View File

@@ -1,5 +1,6 @@
import math
import logging
logger = logging.getLogger(__name__)
import numpy as np
@@ -615,7 +616,9 @@ class SynthesizerTrnMs256NSFsid(nn.Module):
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
@@ -731,7 +734,9 @@ class SynthesizerTrnMs768NSFsid(nn.Module):
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
@@ -844,7 +849,9 @@ class SynthesizerTrnMs256NSFsid_nono(nn.Module):
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
@@ -950,7 +957,9 @@ class SynthesizerTrnMs768NSFsid_nono(nn.Module):
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
logger.debug("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
)
def remove_weight_norm(self):
self.dec.remove_weight_norm()

View File

@@ -1,5 +1,6 @@
import math
import logging
logger = logging.getLogger(__name__)
import numpy as np
@@ -619,7 +620,9 @@ class SynthesizerTrnMsNSFsidM(nn.Module):
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
self.speaker_map = None
logger.debug("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
logger.debug(
"gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim
)
def remove_weight_norm(self):
self.dec.remove_weight_norm()

View File

@@ -4,6 +4,7 @@ import onnxruntime
import soundfile
import logging
logger = logging.getLogger(__name__)

View File

@@ -1,6 +1,7 @@
import os
import traceback
import logging
logger = logging.getLogger(__name__)
import numpy as np

View File

@@ -2,6 +2,7 @@ import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
import logging
logger = logging.getLogger(__name__)
MAX_WAV_VALUE = 32768.0

View File

@@ -1,6 +1,7 @@
import os
import sys
import logging
logger = logging.getLogger(__name__)
now_dir = os.getcwd()

View File

@@ -1,5 +1,6 @@
import os
import logging
logger = logging.getLogger(__name__)
import librosa

View File

@@ -1,6 +1,7 @@
import os
import traceback
import logging
logger = logging.getLogger(__name__)
import ffmpeg

View File

@@ -1,5 +1,6 @@
import os
import logging
logger = logging.getLogger(__name__)
import librosa

View File

@@ -1,5 +1,6 @@
import traceback
import logging
logger = logging.getLogger(__name__)
import numpy as np
@@ -52,8 +53,16 @@ class VC:
if not sid:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del self.net_g, self.n_spk, self.vc, self.hubert_model, self.tgt_sr # ,cpt
self.hubert_model = self.net_g = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None
del (
self.net_g,
self.n_spk,
self.vc,
self.hubert_model,
self.tgt_sr,
) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净

View File

@@ -2,6 +2,7 @@ import os
import sys
import traceback
import logging
logger = logging.getLogger(__name__)
from functools import lru_cache
@@ -267,9 +268,7 @@ class Pipeline(object):
with torch.no_grad():
hasp = pitch is not None and pitchf is not None
arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)
audio1 = (
(net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()
)
audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()
del hasp, arg
del feats, p_len, padding_mask
if torch.cuda.is_available():

View File

@@ -2,6 +2,7 @@ import os
from fairseq import checkpoint_utils
def get_index_path_from_model(sid):
return next(
(