mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2026-01-20 02:51:09 +00:00
chore(sync): merge dev into main (#1379)
* Optimize latency (#1259) * add attribute: configs/config.py Optimize latency: tools/rvc_for_realtime.py * new file: assets/Synthesizer_inputs.pth * fix: configs/config.py fix: tools/rvc_for_realtime.py * fix bug: infer/lib/infer_pack/models.py * new file: assets/hubert_inputs.pth new file: assets/rmvpe_inputs.pth modified: configs/config.py new features: infer/lib/rmvpe.py new features: tools/jit_export/__init__.py new features: tools/jit_export/get_hubert.py new features: tools/jit_export/get_rmvpe.py new features: tools/jit_export/get_synthesizer.py optimize: tools/rvc_for_realtime.py * optimize: tools/jit_export/get_synthesizer.py fix bug: tools/jit_export/__init__.py * Fixed a bug caused by using half on the CPU: infer/lib/rmvpe.py Fixed a bug caused by using half on the CPU: tools/jit_export/__init__.py Fixed CIRCULAR IMPORT: tools/jit_export/get_rmvpe.py Fixed CIRCULAR IMPORT: tools/jit_export/get_synthesizer.py Fixed a bug caused by using half on the CPU: tools/rvc_for_realtime.py * Remove useless code: infer/lib/rmvpe.py * Delete gui_v1 copy.py * Delete .vscode/launch.json * Delete jit_export_test.py * Delete tools/rvc_for_realtime copy.py * Delete configs/config.json * Delete .gitignore * Fix exceptions caused by switching inference devices: infer/lib/rmvpe.py Fix exceptions caused by switching inference devices: tools/jit_export/__init__.py Fix exceptions caused by switching inference devices: tools/rvc_for_realtime.py * restore * replace(you can undo this commit) * remove debug_print --------- Co-authored-by: Ftps <ftpsflandre@gmail.com> * Fixed some bugs when exporting ONNX model (#1254) * fix import (#1280) * fix import * lint * 🎨 同步 locale (#1242) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * Fix jit load and import issue (#1282) * fix jit model loading : infer/lib/rmvpe.py * modified: assets/hubert/.gitignore move file: assets/hubert_inputs.pth -> assets/hubert/hubert_inputs.pth modified: assets/rmvpe/.gitignore move file: assets/rmvpe_inputs.pth -> assets/rmvpe/rmvpe_inputs.pth fix import: gui_v1.py * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * Add input wav and delay time monitor for real-time gui (#1293) * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * 🎨 同步 locale (#1289) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * feat: edit PR template * add input wav and delay time monitor --------- Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> * Optimize latency using scripted jit (#1291) * feat(workflow): trigger on dev * feat(workflow): add close-pr on non-dev branch * 🎨 同步 locale (#1289) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * feat: edit PR template * Optimize-latency-using-scripted: configs/config.py Optimize-latency-using-scripted: infer/lib/infer_pack/attentions.py Optimize-latency-using-scripted: infer/lib/infer_pack/commons.py Optimize-latency-using-scripted: infer/lib/infer_pack/models.py Optimize-latency-using-scripted: infer/lib/infer_pack/modules.py Optimize-latency-using-scripted: infer/lib/jit/__init__.py Optimize-latency-using-scripted: infer/lib/jit/get_hubert.py Optimize-latency-using-scripted: infer/lib/jit/get_rmvpe.py Optimize-latency-using-scripted: infer/lib/jit/get_synthesizer.py Optimize-latency-using-scripted: infer/lib/rmvpe.py Optimize-latency-using-scripted: tools/rvc_for_realtime.py * modified: infer/lib/infer_pack/models.py * fix some bug: configs/config.py fix some bug: infer/lib/infer_pack/models.py fix some bug: infer/lib/rmvpe.py * Fixed abnormal reference of logger in multiprocessing: infer/modules/train/train.py --------- Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * Format code (#1298) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * 🎨 同步 locale (#1299) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * feat: optimize actions * feat(workflow): add sync dev * feat: optimize actions * feat: optimize actions * feat: optimize actions * feat: optimize actions * feat: add jit options (#1303) Delete useless code: infer/lib/jit/get_synthesizer.py Optimized code: tools/rvc_for_realtime.py * Code refactor + re-design inference ui (#1304) * Code refacor + re-design inference ui * Fix tabname * i18n jp --------- Co-authored-by: Ftps <ftpsflandre@gmail.com> * feat: optimize actions * feat: optimize actions * Update README & en_US locale file (#1309) * critical: some bug fixes (#1322) * JIT acceleration switch does not support hot update * fix padding bug of rmvpe in torch-directml * fix padding bug of rmvpe in torch-directml * Fix STFT under torch_directml (#1330) * chore(format): run black on dev (#1318) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * chore(i18n): sync locale on dev (#1317) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * feat: allow for tta to be passed to uvr (#1361) * chore(format): run black on dev (#1373) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * Added script for automatically download all needed models at install (#1366) * Delete modules.py * Add files via upload * Add files via upload * Add files via upload * Add files via upload * chore(i18n): sync locale on dev (#1377) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * chore(format): run black on dev (#1376) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> * Update IPEX library (#1362) * Update IPEX library * Update ipex index * chore(format): run black on dev (#1378) Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> --------- Co-authored-by: Chengjia Jiang <46401978+ChasonJiang@users.noreply.github.com> Co-authored-by: Ftps <ftpsflandre@gmail.com> Co-authored-by: shizuku_nia <102004222+ShizukuNia@users.noreply.github.com> Co-authored-by: Ftps <63702646+Tps-F@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com> Co-authored-by: 源文雨 <41315874+fumiama@users.noreply.github.com> Co-authored-by: yxlllc <33565655+yxlllc@users.noreply.github.com> Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Co-authored-by: Blaise <133521603+blaise-tk@users.noreply.github.com> Co-authored-by: Rice Cake <gak141808@gmail.com> Co-authored-by: AWAS666 <33494149+AWAS666@users.noreply.github.com> Co-authored-by: Dmitry <nda2911@yandex.ru> Co-authored-by: Disty0 <47277141+Disty0@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
fe166e7f3d
commit
e9dd11bddb
@@ -17,7 +17,6 @@ def ipex_init(): # pylint: disable=too-many-statements
|
||||
torch.cuda.device = torch.xpu.device
|
||||
torch.cuda.device_count = torch.xpu.device_count
|
||||
torch.cuda.device_of = torch.xpu.device_of
|
||||
torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
|
||||
torch.cuda.get_device_name = torch.xpu.get_device_name
|
||||
torch.cuda.get_device_properties = torch.xpu.get_device_properties
|
||||
torch.cuda.init = torch.xpu.init
|
||||
@@ -169,9 +168,23 @@ def ipex_init(): # pylint: disable=too-many-statements
|
||||
torch.cuda.get_device_properties.minor = 7
|
||||
torch.cuda.ipc_collect = lambda *args, **kwargs: None
|
||||
torch.cuda.utilization = lambda *args, **kwargs: 0
|
||||
if hasattr(torch.xpu, "getDeviceIdListForCard"):
|
||||
torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
|
||||
torch.cuda.get_device_id_list_per_card = torch.xpu.getDeviceIdListForCard
|
||||
else:
|
||||
torch.cuda.getDeviceIdListForCard = torch.xpu.get_device_id_list_per_card
|
||||
torch.cuda.get_device_id_list_per_card = (
|
||||
torch.xpu.get_device_id_list_per_card
|
||||
)
|
||||
|
||||
ipex_hijacks()
|
||||
attention_init()
|
||||
try:
|
||||
from .diffusers import ipex_diffusers
|
||||
|
||||
ipex_diffusers()
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
pass
|
||||
except Exception as e:
|
||||
return False, e
|
||||
return True, None
|
||||
|
||||
@@ -16,17 +16,15 @@ def torch_bmm(input, mat2, *, out=None):
|
||||
input.shape[1],
|
||||
mat2.shape[2],
|
||||
)
|
||||
block_multiply = 2.4 if input.dtype == torch.float32 else 1.2
|
||||
block_size = (
|
||||
(batch_size_attention * input_tokens * mat2_shape) / 1024 * block_multiply
|
||||
) # MB
|
||||
block_multiply = input.element_size()
|
||||
slice_block_size = input_tokens * mat2_shape / 1024 / 1024 * block_multiply
|
||||
block_size = batch_size_attention * slice_block_size
|
||||
|
||||
split_slice_size = batch_size_attention
|
||||
if block_size >= 4000:
|
||||
if block_size > 4:
|
||||
do_split = True
|
||||
# Find something divisible with the input_tokens
|
||||
while (
|
||||
(split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply
|
||||
) > 4000:
|
||||
while (split_slice_size * slice_block_size) > 4:
|
||||
split_slice_size = split_slice_size // 2
|
||||
if split_slice_size <= 1:
|
||||
split_slice_size = 1
|
||||
@@ -34,16 +32,12 @@ def torch_bmm(input, mat2, *, out=None):
|
||||
else:
|
||||
do_split = False
|
||||
|
||||
split_block_size = (
|
||||
(split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply
|
||||
) # MB
|
||||
split_2_slice_size = input_tokens
|
||||
if split_block_size >= 4000:
|
||||
if split_slice_size * slice_block_size > 4:
|
||||
slice_block_size2 = split_slice_size * mat2_shape / 1024 / 1024 * block_multiply
|
||||
do_split_2 = True
|
||||
# Find something divisible with the input_tokens
|
||||
while (
|
||||
(split_slice_size * split_2_slice_size * mat2_shape) / 1024 * block_multiply
|
||||
) > 4000:
|
||||
while (split_2_slice_size * slice_block_size2) > 4:
|
||||
split_2_slice_size = split_2_slice_size // 2
|
||||
if split_2_slice_size <= 1:
|
||||
split_2_slice_size = 1
|
||||
@@ -91,22 +85,25 @@ def scaled_dot_product_attention(
|
||||
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False
|
||||
):
|
||||
# ARC GPUs can't allocate more than 4GB to a single block, Slice it:
|
||||
shape_one, batch_size_attention, query_tokens, shape_four = query.shape
|
||||
block_multiply = 2.4 if query.dtype == torch.float32 else 1.2
|
||||
block_size = (
|
||||
(shape_one * batch_size_attention * query_tokens * shape_four)
|
||||
/ 1024
|
||||
* block_multiply
|
||||
) # MB
|
||||
if len(query.shape) == 3:
|
||||
batch_size_attention, query_tokens, shape_four = query.shape
|
||||
shape_one = 1
|
||||
no_shape_one = True
|
||||
else:
|
||||
shape_one, batch_size_attention, query_tokens, shape_four = query.shape
|
||||
no_shape_one = False
|
||||
|
||||
block_multiply = query.element_size()
|
||||
slice_block_size = (
|
||||
shape_one * query_tokens * shape_four / 1024 / 1024 * block_multiply
|
||||
)
|
||||
block_size = batch_size_attention * slice_block_size
|
||||
|
||||
split_slice_size = batch_size_attention
|
||||
if block_size >= 4000:
|
||||
if block_size > 4:
|
||||
do_split = True
|
||||
# Find something divisible with the shape_one
|
||||
while (
|
||||
(shape_one * split_slice_size * query_tokens * shape_four)
|
||||
/ 1024
|
||||
* block_multiply
|
||||
) > 4000:
|
||||
while (split_slice_size * slice_block_size) > 4:
|
||||
split_slice_size = split_slice_size // 2
|
||||
if split_slice_size <= 1:
|
||||
split_slice_size = 1
|
||||
@@ -114,20 +111,14 @@ def scaled_dot_product_attention(
|
||||
else:
|
||||
do_split = False
|
||||
|
||||
split_block_size = (
|
||||
(shape_one * split_slice_size * query_tokens * shape_four)
|
||||
/ 1024
|
||||
* block_multiply
|
||||
) # MB
|
||||
split_2_slice_size = query_tokens
|
||||
if split_block_size >= 4000:
|
||||
if split_slice_size * slice_block_size > 4:
|
||||
slice_block_size2 = (
|
||||
shape_one * split_slice_size * shape_four / 1024 / 1024 * block_multiply
|
||||
)
|
||||
do_split_2 = True
|
||||
# Find something divisible with the batch_size_attention
|
||||
while (
|
||||
(shape_one * split_slice_size * split_2_slice_size * shape_four)
|
||||
/ 1024
|
||||
* block_multiply
|
||||
) > 4000:
|
||||
while (split_2_slice_size * slice_block_size2) > 4:
|
||||
split_2_slice_size = split_2_slice_size // 2
|
||||
if split_2_slice_size <= 1:
|
||||
split_2_slice_size = 1
|
||||
@@ -146,31 +137,63 @@ def scaled_dot_product_attention(
|
||||
): # pylint: disable=invalid-name
|
||||
start_idx_2 = i2 * split_2_slice_size
|
||||
end_idx_2 = (i2 + 1) * split_2_slice_size
|
||||
if no_shape_one:
|
||||
hidden_states[
|
||||
start_idx:end_idx, start_idx_2:end_idx_2
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
key[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
value[start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
attn_mask=attn_mask[
|
||||
start_idx:end_idx, start_idx_2:end_idx_2
|
||||
]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
else:
|
||||
hidden_states[
|
||||
:, start_idx:end_idx, start_idx_2:end_idx_2
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
key[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
value[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
attn_mask=attn_mask[
|
||||
:, start_idx:end_idx, start_idx_2:end_idx_2
|
||||
]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
else:
|
||||
if no_shape_one:
|
||||
hidden_states[
|
||||
:, start_idx:end_idx, start_idx_2:end_idx_2
|
||||
start_idx:end_idx
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
key[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
value[:, start_idx:end_idx, start_idx_2:end_idx_2],
|
||||
attn_mask=attn_mask[:, start_idx:end_idx, start_idx_2:end_idx_2]
|
||||
query[start_idx:end_idx],
|
||||
key[start_idx:end_idx],
|
||||
value[start_idx:end_idx],
|
||||
attn_mask=attn_mask[start_idx:end_idx]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
else:
|
||||
hidden_states[
|
||||
:, start_idx:end_idx
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx],
|
||||
key[:, start_idx:end_idx],
|
||||
value[:, start_idx:end_idx],
|
||||
attn_mask=attn_mask[:, start_idx:end_idx]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
else:
|
||||
hidden_states[
|
||||
:, start_idx:end_idx
|
||||
] = original_scaled_dot_product_attention(
|
||||
query[:, start_idx:end_idx],
|
||||
key[:, start_idx:end_idx],
|
||||
value[:, start_idx:end_idx],
|
||||
attn_mask=attn_mask[:, start_idx:end_idx]
|
||||
if attn_mask is not None
|
||||
else attn_mask,
|
||||
dropout_p=dropout_p,
|
||||
is_causal=is_causal,
|
||||
)
|
||||
else:
|
||||
return original_scaled_dot_product_attention(
|
||||
query,
|
||||
|
||||
@@ -23,14 +23,16 @@ try:
|
||||
|
||||
if torch.xpu.is_available():
|
||||
from infer.modules.ipex import ipex_init
|
||||
from infer.modules.ipex.gradscaler import gradscaler_init
|
||||
|
||||
ipex_init()
|
||||
|
||||
from torch.xpu.amp import autocast
|
||||
from infer.modules.ipex.gradscaler import gradscaler_init
|
||||
|
||||
GradScaler = gradscaler_init()
|
||||
ipex_init()
|
||||
else:
|
||||
from torch.cuda.amp import GradScaler, autocast
|
||||
except Exception:
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
from torch.cuda.amp import GradScaler, autocast
|
||||
|
||||
torch.backends.cudnn.deterministic = False
|
||||
@@ -104,14 +106,11 @@ def main():
|
||||
os.environ["MASTER_ADDR"] = "localhost"
|
||||
os.environ["MASTER_PORT"] = str(randint(20000, 55555))
|
||||
children = []
|
||||
logger = utils.get_logger(hps.model_dir)
|
||||
for i in range(n_gpus):
|
||||
subproc = mp.Process(
|
||||
target=run,
|
||||
args=(
|
||||
i,
|
||||
n_gpus,
|
||||
hps,
|
||||
),
|
||||
args=(i, n_gpus, hps, logger),
|
||||
)
|
||||
children.append(subproc)
|
||||
subproc.start()
|
||||
@@ -120,10 +119,10 @@ def main():
|
||||
children[i].join()
|
||||
|
||||
|
||||
def run(rank, n_gpus, hps):
|
||||
def run(rank, n_gpus, hps, logger: logging.Logger):
|
||||
global global_step
|
||||
if rank == 0:
|
||||
logger = utils.get_logger(hps.model_dir)
|
||||
# logger = utils.get_logger(hps.model_dir)
|
||||
logger.info(hps)
|
||||
# utils.check_git_hash(hps.model_dir)
|
||||
writer = SummaryWriter(log_dir=hps.model_dir)
|
||||
|
||||
@@ -16,13 +16,13 @@ from infer.lib.uvr5_pack.utils import inference
|
||||
|
||||
|
||||
class AudioPre:
|
||||
def __init__(self, agg, model_path, device, is_half):
|
||||
def __init__(self, agg, model_path, device, is_half, tta=False):
|
||||
self.model_path = model_path
|
||||
self.device = device
|
||||
self.data = {
|
||||
# Processing Options
|
||||
"postprocess": False,
|
||||
"tta": False,
|
||||
"tta": tta,
|
||||
# Constants
|
||||
"window_size": 512,
|
||||
"agg": agg,
|
||||
@@ -180,13 +180,13 @@ class AudioPre:
|
||||
|
||||
|
||||
class AudioPreDeEcho:
|
||||
def __init__(self, agg, model_path, device, is_half):
|
||||
def __init__(self, agg, model_path, device, is_half, tta=False):
|
||||
self.model_path = model_path
|
||||
self.device = device
|
||||
self.data = {
|
||||
# Processing Options
|
||||
"postprocess": False,
|
||||
"tta": False,
|
||||
"tta": tta,
|
||||
# Constants
|
||||
"window_size": 512,
|
||||
"agg": agg,
|
||||
|
||||
@@ -54,16 +54,10 @@ class VC:
|
||||
if sid == "" or sid == []:
|
||||
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
|
||||
logger.info("Clean model cache")
|
||||
del (
|
||||
self.net_g,
|
||||
self.n_spk,
|
||||
self.vc,
|
||||
self.hubert_model,
|
||||
self.tgt_sr,
|
||||
) # ,cpt
|
||||
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
|
||||
self.hubert_model = (
|
||||
self.net_g
|
||||
) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None
|
||||
) = self.n_spk = self.hubert_model = self.tgt_sr = None
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
###楼下不这么折腾清理不干净
|
||||
|
||||
Reference in New Issue
Block a user