Format code (#275)

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
github-actions[bot]
2023-05-14 07:52:36 +00:00
committed by GitHub
parent 32437314b8
commit 6a3eaef090
8 changed files with 295 additions and 158 deletions

View File

@@ -1,5 +1,6 @@
import torch, os, traceback, sys, warnings, shutil, numpy as np
os.environ["no_proxy"]="localhost, 127.0.0.1, ::1"
os.environ["no_proxy"] = "localhost, 127.0.0.1, ::1"
from multiprocessing import cpu_count
import threading
from time import sleep
@@ -11,8 +12,8 @@ now_dir = os.getcwd()
sys.path.append(now_dir)
tmp = os.path.join(now_dir, "TEMP")
shutil.rmtree(tmp, ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack"%(now_dir), ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack"%(now_dir) , ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True)
os.makedirs(tmp, exist_ok=True)
os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
os.makedirs(os.path.join(now_dir, "weights"), exist_ok=True)
@@ -70,7 +71,12 @@ else:
gpu_info = i18n("很遗憾您这没有能用的显卡来支持您训练")
default_batch_size = 1
gpus = "-".join([i[0] for i in gpu_infos])
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono,SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono
from infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from scipy.io import wavfile
from fairseq import checkpoint_utils
import gradio as gr
@@ -121,11 +127,11 @@ names = []
for name in os.listdir(weight_root):
if name.endswith(".pth"):
names.append(name)
index_paths=[]
index_paths = []
for root, dirs, files in os.walk(index_root, topdown=False):
for name in files:
if name.endswith(".index") and "trained" not in name:
index_paths.append("%s/%s"%(root,name))
index_paths.append("%s/%s" % (root, name))
uvr5_names = []
for name in os.listdir(weight_uvr5_root):
if name.endswith(".pth"):
@@ -144,29 +150,33 @@ def vc_single(
index_rate,
filter_radius,
resample_sr,
rms_mix_rate
rms_mix_rate,
): # spk_item, input_audio0, vc_transform0,f0_file,f0method0
global tgt_sr, net_g, vc, hubert_model,version
global tgt_sr, net_g, vc, hubert_model, version
if input_audio_path is None:
return "You need to upload an audio", None
f0_up_key = int(f0_up_key)
try:
audio = load_audio(input_audio_path, 16000)
audio_max=np.abs(audio).max()/0.95
if(audio_max>1):
audio/=audio_max
audio_max = np.abs(audio).max() / 0.95
if audio_max > 1:
audio /= audio_max
times = [0, 0, 0]
if hubert_model == None:
load_hubert()
if_f0 = cpt.get("f0", 1)
file_index = (
file_index.strip(" ")
.strip('"')
.strip("\n")
.strip('"')
.strip(" ")
.replace("trained", "added")
)if file_index!=""else file_index2 # 防止小白写错,自动帮他替换掉
(
file_index.strip(" ")
.strip('"')
.strip("\n")
.strip('"')
.strip(" ")
.replace("trained", "added")
)
if file_index != ""
else file_index2
) # 防止小白写错,自动帮他替换掉
# file_big_npy = (
# file_big_npy.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
# )
@@ -190,10 +200,19 @@ def vc_single(
version,
f0_file=f0_file,
)
if(resample_sr>=16000 and tgt_sr!=resample_sr):
tgt_sr=resample_sr
index_info="Using index:%s."%file_index if os.path.exists(file_index)else"Index not used."
return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss"%(index_info,times[0],times[1],times[2]), (tgt_sr, audio_opt)
if resample_sr >= 16000 and tgt_sr != resample_sr:
tgt_sr = resample_sr
index_info = (
"Using index:%s." % file_index
if os.path.exists(file_index)
else "Index not used."
)
return "Success.\n %s\nTime:\n npy:%ss, f0:%ss, infer:%ss" % (
index_info,
times[0],
times[1],
times[2],
), (tgt_sr, audio_opt)
except:
info = traceback.format_exc()
print(info)
@@ -213,7 +232,7 @@ def vc_multi(
index_rate,
filter_radius,
resample_sr,
rms_mix_rate
rms_mix_rate,
):
try:
dir_path = (
@@ -243,9 +262,9 @@ def vc_multi(
index_rate,
filter_radius,
resample_sr,
rms_mix_rate
rms_mix_rate,
)
if "Success"in info:
if "Success" in info:
try:
tgt_sr, audio_opt = opt
wavfile.write(
@@ -330,8 +349,8 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg):
# 一个选项卡全局只能有一个音色
def get_vc(sid):
global n_spk, tgt_sr, net_g, vc, cpt,version
if sid == ""or sid==[]:
global n_spk, tgt_sr, net_g, vc, cpt, version
if sid == "" or sid == []:
global hubert_model
if hubert_model != None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
print("clean_empty_cache")
@@ -342,14 +361,18 @@ def get_vc(sid):
###楼下不这么折腾清理不干净
if_f0 = cpt.get("f0", 1)
version = cpt.get("version", "v1")
if (version == "v1"):
if version == "v1":
if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
net_g = SynthesizerTrnMs256NSFsid(
*cpt["config"], is_half=config.is_half
)
else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
elif (version == "v2"):
elif version == "v2":
if if_f0 == 1:
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
net_g = SynthesizerTrnMs768NSFsid(
*cpt["config"], is_half=config.is_half
)
else:
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
del net_g, cpt
@@ -364,12 +387,12 @@ def get_vc(sid):
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
if_f0 = cpt.get("f0", 1)
version = cpt.get("version", "v1")
if(version=="v1"):
if version == "v1":
if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
else:
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
elif(version=="v2"):
elif version == "v2":
if if_f0 == 1:
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
else:
@@ -391,17 +414,21 @@ def change_choices():
for name in os.listdir(weight_root):
if name.endswith(".pth"):
names.append(name)
index_paths=[]
index_paths = []
for root, dirs, files in os.walk(index_root, topdown=False):
for name in files:
if name.endswith(".index") and "trained" not in name:
index_paths.append("%s/%s" % (root, name))
return {"choices": sorted(names), "__type__": "update"},{"choices": sorted(index_paths), "__type__": "update"}
return {"choices": sorted(names), "__type__": "update"}, {
"choices": sorted(index_paths),
"__type__": "update",
}
def clean():
return {"value": "", "__type__": "update"}
sr_dict = {
"32k": 32000,
"40k": 40000,
@@ -468,7 +495,7 @@ def preprocess_dataset(trainset_dir, exp_dir, sr, n_p):
# but2.click(extract_f0,[gpus6,np7,f0method8,if_f0_3,trainset_dir4],[info2])
def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir,version19):
def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19):
gpus = gpus.split("-")
os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True)
f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w")
@@ -514,14 +541,18 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir,version19):
leng = len(gpus)
ps = []
for idx, n_g in enumerate(gpus):
cmd = config.python_cmd + " extract_feature_print.py %s %s %s %s %s/logs/%s %s" % (
config.device,
leng,
idx,
n_g,
now_dir,
exp_dir,
version19,
cmd = (
config.python_cmd
+ " extract_feature_print.py %s %s %s %s %s/logs/%s %s"
% (
config.device,
leng,
idx,
n_g,
now_dir,
exp_dir,
version19,
)
)
print(cmd)
p = Popen(
@@ -549,34 +580,47 @@ def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir,version19):
yield log
def change_sr2(sr2, if_f0_3,version19):
vis_v=True if sr2=="40k"else False
if(sr2!="40k"):version19="v1"
path_str=""if version19=="v1"else "_v2"
version_state={"visible": vis_v, "__type__": "update"}
if(vis_v==False):version_state["value"]="v1"
f0_str="f0"if if_f0_3 else""
return "pretrained%s/%sG%s.pth" % (path_str,f0_str,sr2), "pretrained%s/%sD%s.pth" % (path_str,f0_str,sr2),version_state
def change_sr2(sr2, if_f0_3, version19):
vis_v = True if sr2 == "40k" else False
if sr2 != "40k":
version19 = "v1"
path_str = "" if version19 == "v1" else "_v2"
version_state = {"visible": vis_v, "__type__": "update"}
if vis_v == False:
version_state["value"] = "v1"
f0_str = "f0" if if_f0_3 else ""
return (
"pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2),
"pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2),
version_state,
)
def change_version19(sr2,if_f0_3,version19):
path_str=""if version19=="v1"else "_v2"
f0_str="f0"if if_f0_3 else""
return "pretrained%s/%sG%s.pth" % (path_str,f0_str,sr2), "pretrained%s/%sD%s.pth" % (path_str,f0_str,sr2)
def change_f0(if_f0_3, sr2,version19): # f0method8,pretrained_G14,pretrained_D15
path_str=""if version19=="v1"else "_v2"
def change_version19(sr2, if_f0_3, version19):
path_str = "" if version19 == "v1" else "_v2"
f0_str = "f0" if if_f0_3 else ""
return "pretrained%s/%sG%s.pth" % (
path_str,
f0_str,
sr2,
), "pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2)
def change_f0(if_f0_3, sr2, version19): # f0method8,pretrained_G14,pretrained_D15
path_str = "" if version19 == "v1" else "_v2"
if if_f0_3:
return (
{"visible": True, "__type__": "update"},
"pretrained%s/f0G%s.pth" % (path_str,sr2),
"pretrained%s/f0D%s.pth" % (path_str,sr2),
"pretrained%s/f0G%s.pth" % (path_str, sr2),
"pretrained%s/f0D%s.pth" % (path_str, sr2),
)
return (
{"visible": False, "__type__": "update"},
"pretrained%s/G%s.pth" % (path_str,sr2),
"pretrained%s/D%s.pth" % (path_str,sr2),
"pretrained%s/G%s.pth" % (path_str, sr2),
"pretrained%s/D%s.pth" % (path_str, sr2),
)
# but3.click(click_train,[exp_dir1,sr2,if_f0_3,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16])
def click_train(
exp_dir1,
@@ -598,7 +642,11 @@ def click_train(
exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
os.makedirs(exp_dir, exist_ok=True)
gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir)
feature_dir = "%s/3_feature256" % (exp_dir)if version19=="v1"else "%s/3_feature768" % (exp_dir)
feature_dir = (
"%s/3_feature256" % (exp_dir)
if version19 == "v1"
else "%s/3_feature768" % (exp_dir)
)
if if_f0_3:
f0_dir = "%s/2a_f0" % (exp_dir)
f0nsf_dir = "%s/2b-f0nsf" % (exp_dir)
@@ -640,18 +688,18 @@ def click_train(
spk_id5,
)
)
fea_dim = 256 if version19 == "v1"else 768
fea_dim = 256 if version19 == "v1" else 768
if if_f0_3:
for _ in range(2):
opt.append(
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
% (now_dir, sr2, now_dir,fea_dim, now_dir, now_dir, spk_id5)
% (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
)
else:
for _ in range(2):
opt.append(
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
% (now_dir, sr2, now_dir,fea_dim, spk_id5)
% (now_dir, sr2, now_dir, fea_dim, spk_id5)
)
shuffle(opt)
with open("%s/filelist.txt" % exp_dir, "w") as f:
@@ -706,10 +754,14 @@ def click_train(
# but4.click(train_index, [exp_dir1], info3)
def train_index(exp_dir1,version19):
def train_index(exp_dir1, version19):
exp_dir = "%s/logs/%s" % (now_dir, exp_dir1)
os.makedirs(exp_dir, exist_ok=True)
feature_dir = "%s/3_feature256" % (exp_dir)if version19=="v1"else "%s/3_feature768" % (exp_dir)
feature_dir = (
"%s/3_feature256" % (exp_dir)
if version19 == "v1"
else "%s/3_feature768" % (exp_dir)
)
if os.path.exists(feature_dir) == False:
return "请先进行特征提取!"
listdir_res = list(os.listdir(feature_dir))
@@ -729,7 +781,7 @@ def train_index(exp_dir1,version19):
infos = []
infos.append("%s,%s" % (big_npy.shape, n_ivf))
yield "\n".join(infos)
index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,Flat" % n_ivf)
index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
# index = faiss.index_factory(256if version19=="v1"else 768, "IVF%s,PQ128x4fs,RFlat"%n_ivf)
infos.append("training")
yield "\n".join(infos)
@@ -738,7 +790,8 @@ def train_index(exp_dir1,version19):
index.train(big_npy)
faiss.write_index(
index,
"%s/trained_IVF%s_Flat_nprobe_%s_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe,version19),
"%s/trained_IVF%s_Flat_nprobe_%s_%s.index"
% (exp_dir, n_ivf, index_ivf.nprobe, version19),
)
# faiss.write_index(index, '%s/trained_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
infos.append("adding")
@@ -750,7 +803,10 @@ def train_index(exp_dir1,version19):
index,
"%s/added_IVF%s_Flat_nprobe_%s.index" % (exp_dir, n_ivf, index_ivf.nprobe),
)
infos.append("成功构建索引added_IVF%s_Flat_nprobe_%s_%s.index" % (n_ivf, index_ivf.nprobe,version19))
infos.append(
"成功构建索引added_IVF%s_Flat_nprobe_%s_%s.index"
% (n_ivf, index_ivf.nprobe, version19)
)
# faiss.write_index(index, '%s/added_IVF%s_Flat_FastScan_%s.index'%(exp_dir,n_ivf,version19))
# infos.append("成功构建索引added_IVF%s_Flat_FastScan_%s.index"%(n_ivf,version19))
yield "\n".join(infos)
@@ -786,7 +842,11 @@ def train1key(
preprocess_log_path = "%s/preprocess.log" % model_log_dir
extract_f0_feature_log_path = "%s/extract_f0_feature.log" % model_log_dir
gt_wavs_dir = "%s/0_gt_wavs" % model_log_dir
feature_dir = "%s/3_feature256" % model_log_dir if version19=="v1"else "%s/3_feature768" % model_log_dir
feature_dir = (
"%s/3_feature256" % model_log_dir
if version19 == "v1"
else "%s/3_feature768" % model_log_dir
)
os.makedirs(model_log_dir, exist_ok=True)
#########step1:处理数据
@@ -830,7 +890,8 @@ def train1key(
leng,
idx,
n_g,
model_log_dir,version19,
model_log_dir,
version19,
)
yield get_info_str(cmd)
p = Popen(
@@ -885,18 +946,18 @@ def train1key(
spk_id5,
)
)
fea_dim=256 if version19=="v1"else 768
fea_dim = 256 if version19 == "v1" else 768
if if_f0_3:
for _ in range(2):
opt.append(
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s"
% (now_dir, sr2, now_dir,fea_dim, now_dir, now_dir, spk_id5)
% (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)
)
else:
for _ in range(2):
opt.append(
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s"
% (now_dir, sr2, now_dir,fea_dim, spk_id5)
% (now_dir, sr2, now_dir, fea_dim, spk_id5)
)
shuffle(opt)
with open("%s/filelist.txt" % model_log_dir, "w") as f:
@@ -961,7 +1022,7 @@ def train1key(
# n_ivf = big_npy.shape[0] // 39
n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
yield get_info_str("%s,%s" % (big_npy.shape, n_ivf))
index = faiss.index_factory(256 if version19=="v1"else 768, "IVF%s,Flat" % n_ivf)
index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf)
yield get_info_str("training index")
index_ivf = faiss.extract_index_ivf(index) #
index_ivf.nprobe = 1
@@ -969,7 +1030,7 @@ def train1key(
faiss.write_index(
index,
"%s/trained_IVF%s_Flat_nprobe_%s_%s.index"
% (model_log_dir, n_ivf, index_ivf.nprobe,version19),
% (model_log_dir, n_ivf, index_ivf.nprobe, version19),
)
yield get_info_str("adding index")
batch_size_add = 8192
@@ -978,10 +1039,11 @@ def train1key(
faiss.write_index(
index,
"%s/added_IVF%s_Flat_nprobe_%s_%s.index"
% (model_log_dir, n_ivf, index_ivf.nprobe,version19),
% (model_log_dir, n_ivf, index_ivf.nprobe, version19),
)
yield get_info_str(
"成功构建索引, added_IVF%s_Flat_nprobe_%s_%s.index" % (n_ivf, index_ivf.nprobe,version19)
"成功构建索引, added_IVF%s_Flat_nprobe_%s_%s.index"
% (n_ivf, index_ivf.nprobe, version19)
)
yield get_info_str(i18n("全流程结束!"))
@@ -999,8 +1061,8 @@ def change_info_(ckpt_path):
) as f:
info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1])
sr, f0 = info["sample_rate"], info["if_f0"]
version="v2"if("version"in info and info["version"]=="v2")else"v1"
return sr, str(f0),version
version = "v2" if ("version" in info and info["version"] == "v2") else "v1"
return sr, str(f0), version
except:
traceback.print_exc()
return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"}
@@ -1136,7 +1198,7 @@ with gr.Blocks() as app:
value="pm",
interactive=True,
)
filter_radius0=gr.Slider(
filter_radius0 = gr.Slider(
minimum=0,
maximum=7,
label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波数值为滤波半径使用可以削弱哑音"),
@@ -1155,7 +1217,9 @@ with gr.Blocks() as app:
choices=sorted(index_paths),
interactive=True,
)
refresh_button.click(fn=change_choices, inputs=[], outputs=[sid0, file_index2])
refresh_button.click(
fn=change_choices, inputs=[], outputs=[sid0, file_index2]
)
# file_big_npy1 = gr.Textbox(
# label=i18n("特征文件路径"),
# value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
@@ -1168,7 +1232,7 @@ with gr.Blocks() as app:
value=0.76,
interactive=True,
)
resample_sr0=gr.Slider(
resample_sr0 = gr.Slider(
minimum=0,
maximum=48000,
label=i18n("后处理重采样至最终采样率0为不进行重采样"),
@@ -1202,7 +1266,7 @@ with gr.Blocks() as app:
index_rate1,
filter_radius0,
resample_sr0,
rms_mix_rate0
rms_mix_rate0,
],
[vc_output1, vc_output2],
)
@@ -1222,7 +1286,7 @@ with gr.Blocks() as app:
value="pm",
interactive=True,
)
filter_radius1=gr.Slider(
filter_radius1 = gr.Slider(
minimum=0,
maximum=7,
label=i18n(">=3则使用对harvest音高识别的结果使用中值滤波数值为滤波半径使用可以削弱哑音"),
@@ -1253,7 +1317,7 @@ with gr.Blocks() as app:
value=1,
interactive=True,
)
resample_sr1=gr.Slider(
resample_sr1 = gr.Slider(
minimum=0,
maximum=48000,
label=i18n("后处理重采样至最终采样率0为不进行重采样"),
@@ -1293,7 +1357,7 @@ with gr.Blocks() as app:
index_rate2,
filter_radius1,
resample_sr1,
rms_mix_rate1
rms_mix_rate1,
],
[vc_output3],
)
@@ -1398,7 +1462,7 @@ with gr.Blocks() as app:
but1 = gr.Button(i18n("处理数据"), variant="primary")
info1 = gr.Textbox(label=i18n("输出信息"), value="")
but1.click(
preprocess_dataset, [trainset_dir4, exp_dir1, sr2,np7], [info1]
preprocess_dataset, [trainset_dir4, exp_dir1, sr2, np7], [info1]
)
with gr.Group():
gr.Markdown(value=i18n("step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)"))
@@ -1423,7 +1487,7 @@ with gr.Blocks() as app:
info2 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
but2.click(
extract_f0_feature,
[gpus6, np7, f0method8, if_f0_3, exp_dir1,version19],
[gpus6, np7, f0method8, if_f0_3, exp_dir1, version19],
[info2],
)
with gr.Group():
@@ -1468,9 +1532,7 @@ with gr.Blocks() as app:
interactive=True,
)
if_save_every_weights18 = gr.Radio(
label=i18n(
"是否在每次保存时间点将最终小模型保存至weights文件夹"
),
label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),
choices=[i18n(""), i18n("")],
value=i18n(""),
interactive=True,
@@ -1487,14 +1549,18 @@ with gr.Blocks() as app:
interactive=True,
)
sr2.change(
change_sr2, [sr2, if_f0_3,version19], [pretrained_G14, pretrained_D15,version19]
change_sr2,
[sr2, if_f0_3, version19],
[pretrained_G14, pretrained_D15, version19],
)
version19.change(
change_version19, [sr2, if_f0_3,version19], [pretrained_G14, pretrained_D15]
change_version19,
[sr2, if_f0_3, version19],
[pretrained_G14, pretrained_D15],
)
if_f0_3.change(
change_f0,
[if_f0_3, sr2,version19],
[if_f0_3, sr2, version19],
[f0method8, pretrained_G14, pretrained_D15],
)
gpus16 = gr.Textbox(
@@ -1526,7 +1592,7 @@ with gr.Blocks() as app:
],
info3,
)
but4.click(train_index, [exp_dir1,version19], info3)
but4.click(train_index, [exp_dir1, version19], info3)
but5.click(
train1key,
[
@@ -1586,7 +1652,7 @@ with gr.Blocks() as app:
max_lines=1,
interactive=True,
)
version_2=gr.Radio(
version_2 = gr.Radio(
label=i18n("模型版本型号"),
choices=["v1", "v2"],
value="v1",
@@ -1597,7 +1663,16 @@ with gr.Blocks() as app:
info4 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
but6.click(
merge,
[ckpt_a, ckpt_b, alpha_a, sr_, if_f0_, info__, name_to_save0,version_2],
[
ckpt_a,
ckpt_b,
alpha_a,
sr_,
if_f0_,
info__,
name_to_save0,
version_2,
],
info4,
) # def merge(path1,path2,alpha1,sr,f0,info):
with gr.Group():
@@ -1655,7 +1730,7 @@ with gr.Blocks() as app:
value="1",
interactive=True,
)
version_1=gr.Radio(
version_1 = gr.Radio(
label=i18n("模型版本型号"),
choices=["v1", "v2"],
value="v1",
@@ -1666,10 +1741,12 @@ with gr.Blocks() as app:
)
but9 = gr.Button(i18n("提取"), variant="primary")
info7 = gr.Textbox(label=i18n("输出信息"), value="", max_lines=8)
ckpt_path2.change(change_info_, [ckpt_path2], [sr__, if_f0__,version_1])
ckpt_path2.change(
change_info_, [ckpt_path2], [sr__, if_f0__, version_1]
)
but9.click(
extract_small_model,
[ckpt_path2, save_name, sr__, if_f0__, info___,version_1],
[ckpt_path2, save_name, sr__, if_f0__, info___, version_1],
info7,
)
@@ -1687,16 +1764,16 @@ with gr.Blocks() as app:
butOnnx = gr.Button(i18n("导出Onnx模型"), variant="primary")
butOnnx.click(export_onnx, [ckpt_dir, onnx_dir, moevs], infoOnnx)
tab_faq=i18n("常见问题解答")
tab_faq = i18n("常见问题解答")
with gr.TabItem(tab_faq):
try:
if(tab_faq=="常见问题解答"):
with open("docs/faq.md","r",encoding="utf8")as f:info=f.read()
if tab_faq == "常见问题解答":
with open("docs/faq.md", "r", encoding="utf8") as f:
info = f.read()
else:
with open("docs/faq_en.md", "r")as f:info = f.read()
gr.Markdown(
value=info
)
with open("docs/faq_en.md", "r") as f:
info = f.read()
gr.Markdown(value=info)
except:
gr.Markdown(traceback.format_exc())