Format code (#1193)

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
github-actions[bot]
2023-09-14 09:34:30 +09:00
committed by GitHub
parent 72a18e66b6
commit a6456f6d46
15 changed files with 562 additions and 237 deletions

View File

@@ -3,38 +3,49 @@ import numpy as np
import av
from io import BytesIO
def wav2(i, o, format):
inp = av.open(i, 'rb')
if format == "m4a": format = "mp4"
out = av.open(o, 'wb', format=format)
if format == "ogg": format = "libvorbis"
if format == "mp4": format = "aac"
inp = av.open(i, "rb")
if format == "m4a":
format = "mp4"
out = av.open(o, "wb", format=format)
if format == "ogg":
format = "libvorbis"
if format == "mp4":
format = "aac"
ostream = out.add_stream(format)
for frame in inp.decode(audio=0):
for p in ostream.encode(frame): out.mux(p)
for p in ostream.encode(frame):
out.mux(p)
for p in ostream.encode(None): out.mux(p)
for p in ostream.encode(None):
out.mux(p)
out.close()
inp.close()
def audio2(i, o, format, sr):
inp = av.open(i, 'rb')
out = av.open(o, 'wb', format=format)
if format == "ogg": format = "libvorbis"
if format == "f32le": format = "pcm_f32le"
inp = av.open(i, "rb")
out = av.open(o, "wb", format=format)
if format == "ogg":
format = "libvorbis"
if format == "f32le":
format = "pcm_f32le"
ostream = out.add_stream(format, channels=1)
ostream.sample_rate = sr
for frame in inp.decode(audio=0):
for p in ostream.encode(frame): out.mux(p)
for p in ostream.encode(frame):
out.mux(p)
out.close()
inp.close()
def load_audio(file, sr):
try:
file = (

View File

@@ -15,6 +15,7 @@ from infer.lib.infer_pack.commons import get_padding, init_weights
has_xpu = bool(hasattr(torch, "xpu") and torch.xpu.is_available())
class TextEncoder256(nn.Module):
def __init__(
self,
@@ -1158,7 +1159,9 @@ class DiscriminatorP(torch.nn.Module):
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
if has_xpu and x.dtype == torch.bfloat16:
x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(dtype=torch.bfloat16)
x = F.pad(x.to(dtype=torch.float16), (0, n_pad), "reflect").to(
dtype=torch.bfloat16
)
else:
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad

View File

@@ -2,11 +2,14 @@ import pdb, os
import numpy as np
import torch
try:
#Fix "Torch not compiled with CUDA enabled"
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
# Fix "Torch not compiled with CUDA enabled"
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
if torch.xpu.is_available():
from infer.modules.ipex import ipex_init
ipex_init()
except Exception:
pass