mirror of
https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
synced 2026-01-19 18:41:52 +00:00
Format code (#989)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
7293002f53
commit
76b67842ba
10
MDXNet.py
10
MDXNet.py
@@ -83,12 +83,13 @@ def get_models(device, dim_f, dim_t, n_fft):
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
import sys
|
||||
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from config import Config
|
||||
|
||||
cpu = torch.device("cpu")
|
||||
device=Config().device
|
||||
device = Config().device
|
||||
# if torch.cuda.is_available():
|
||||
# device = torch.device("cuda:0")
|
||||
# elif torch.backends.mps.is_available():
|
||||
@@ -104,10 +105,15 @@ class Predictor:
|
||||
device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
|
||||
)
|
||||
import onnxruntime as ort
|
||||
|
||||
print(ort.get_available_providers())
|
||||
self.model = ort.InferenceSession(
|
||||
os.path.join(args.onnx, self.model_.target_name + ".onnx"),
|
||||
providers=["CUDAExecutionProvider", "DmlExecutionProvider","CPUExecutionProvider"],
|
||||
providers=[
|
||||
"CUDAExecutionProvider",
|
||||
"DmlExecutionProvider",
|
||||
"CPUExecutionProvider",
|
||||
],
|
||||
)
|
||||
print("onnx load done")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user