mirror of
https://github.com/QuentinFuxa/WhisperLiveKit.git
synced 2026-03-20 07:52:27 +00:00
Merge branch 'main' into ayo-logging-fixes
This commit is contained in:
62
README.md
62
README.md
@@ -3,42 +3,50 @@ Whisper realtime streaming for long speech-to-text transcription and translation
|
||||
|
||||
**Turning Whisper into Real-Time Transcription System**
|
||||
|
||||
Demonstration paper, by Dominik Macháček, Raj Dabre, Ondřej Bojar, 2023
|
||||
Demonstration paper, by [Dominik Macháček](https://ufal.mff.cuni.cz/dominik-machacek), [Raj Dabre](https://prajdabre.github.io/), [Ondřej Bojar](https://ufal.mff.cuni.cz/ondrej-bojar), 2023
|
||||
|
||||
Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference.
|
||||
Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real-time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference.
|
||||
|
||||
|
||||
Paper in proceedings: http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/pdf/2023.ijcnlp-demo.3.pdf
|
||||
|
||||
Demo video: https://player.vimeo.com/video/840442741
|
||||
[Paper PDF](https://aclanthology.org/2023.ijcnlp-demo.3.pdf), [Demo video](https://player.vimeo.com/video/840442741)
|
||||
|
||||
[Slides](http://ufallab.ms.mff.cuni.cz/~machacek/pre-prints/AACL23-2.11.2023-Turning-Whisper-oral.pdf) -- 15 minutes oral presentation at IJCNLP-AACL 2023
|
||||
|
||||
Please, cite us. [Bibtex citation](http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/bib/2023.ijcnlp-demo.3.bib):
|
||||
Please, cite us. [ACL Anthology](https://aclanthology.org/2023.ijcnlp-demo.3/), [Bibtex citation](https://aclanthology.org/2023.ijcnlp-demo.3.bib):
|
||||
|
||||
```
|
||||
@InProceedings{machacek-dabre-bojar:2023:ijcnlp,
|
||||
author = {Macháček, Dominik and Dabre, Raj and Bojar, Ondřej},
|
||||
title = {Turning Whisper into Real-Time Transcription System},
|
||||
booktitle = {System Demonstrations},
|
||||
month = {November},
|
||||
year = {2023},
|
||||
address = {Bali, Indonesia},
|
||||
publisher = {Asian Federation of Natural Language Processing},
|
||||
pages = {17--24},
|
||||
@inproceedings{machacek-etal-2023-turning,
|
||||
title = "Turning Whisper into Real-Time Transcription System",
|
||||
author = "Mach{\'a}{\v{c}}ek, Dominik and
|
||||
Dabre, Raj and
|
||||
Bojar, Ond{\v{r}}ej",
|
||||
editor = "Saha, Sriparna and
|
||||
Sujaini, Herry",
|
||||
booktitle = "Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics: System Demonstrations",
|
||||
month = nov,
|
||||
year = "2023",
|
||||
address = "Bali, Indonesia",
|
||||
publisher = "Association for Computational Linguistics",
|
||||
url = "https://aclanthology.org/2023.ijcnlp-demo.3",
|
||||
pages = "17--24",
|
||||
}
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
1) ``pip install librosa`` -- audio processing library
|
||||
1) ``pip install librosa soundfile`` -- audio processing library
|
||||
|
||||
2) Whisper backend.
|
||||
|
||||
Two alternative backends are integrated. The most recommended one is [faster-whisper](https://github.com/guillaumekln/faster-whisper) with GPU support. Follow their instructions for NVIDIA libraries -- we succeeded with CUDNN 8.5.0 and CUDA 11.7. Install with `pip install faster-whisper`.
|
||||
Several alternative backends are integrated. The most recommended one is [faster-whisper](https://github.com/guillaumekln/faster-whisper) with GPU support. Follow their instructions for NVIDIA libraries -- we succeeded with CUDNN 8.5.0 and CUDA 11.7. Install with `pip install faster-whisper`.
|
||||
|
||||
Alternative, less restrictive, but slower backend is [whisper-timestamped](https://github.com/linto-ai/whisper-timestamped): `pip install git+https://github.com/linto-ai/whisper-timestamped`
|
||||
|
||||
Thirdly, it's also possible to run this software from the [OpenAI Whisper API](https://platform.openai.com/docs/api-reference/audio/createTranscription). This solution is fast and requires no GPU, just a small VM will suffice, but you will need to pay OpenAI for api access. Also note that, since each audio fragment is processed multiple times, the [price](https://openai.com/pricing) will be higher than obvious from the pricing page, so keep an eye on costs while using. Setting a higher chunk-size will reduce costs significantly.
|
||||
Install with: `pip install openai`
|
||||
|
||||
For running with the openai-api backend, make sure that your [OpenAI api key](https://platform.openai.com/api-keys) is set in the `OPENAI_API_KEY` environment variable. For example, before running, do: `export OPENAI_API_KEY=sk-xxx` with *sk-xxx* replaced with your api key.
|
||||
|
||||
The backend is loaded only when chosen. The unused one does not have to be installed.
|
||||
|
||||
3) Optional, not recommended: sentence segmenter (aka sentence tokenizer)
|
||||
@@ -69,7 +77,7 @@ In case of installation issues of opus-fast-mosestokenizer, especially on Window
|
||||
|
||||
```
|
||||
usage: whisper_online.py [-h] [--min-chunk-size MIN_CHUNK_SIZE] [--model {tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large}] [--model_cache_dir MODEL_CACHE_DIR] [--model_dir MODEL_DIR] [--lan LAN] [--task {transcribe,translate}]
|
||||
[--backend {faster-whisper,whisper_timestamped}] [--vad] [--buffer_trimming {sentence,segment}] [--buffer_trimming_sec BUFFER_TRIMMING_SEC] [--start_at START_AT] [--offline] [--comp_unaware]
|
||||
[--backend {faster-whisper,whisper_timestamped,openai-api}] [--vad] [--buffer_trimming {sentence,segment}] [--buffer_trimming_sec BUFFER_TRIMMING_SEC] [--start_at START_AT] [--offline] [--comp_unaware]
|
||||
audio_path
|
||||
|
||||
positional arguments:
|
||||
@@ -86,10 +94,10 @@ options:
|
||||
--model_dir MODEL_DIR
|
||||
Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.
|
||||
--lan LAN, --language LAN
|
||||
Language code for transcription, e.g. en,de,cs.
|
||||
Source language code, e.g. en,de,cs, or 'auto' for language detection.
|
||||
--task {transcribe,translate}
|
||||
Transcribe or translate.
|
||||
--backend {faster-whisper,whisper_timestamped}
|
||||
--backend {faster-whisper,whisper_timestamped,openai-api}
|
||||
Load only this backend for Whisper processing.
|
||||
--vad Use VAD = voice activity detection, with the default parameters.
|
||||
--buffer_trimming {sentence,segment}
|
||||
@@ -147,7 +155,7 @@ The code whisper_online.py is nicely commented, read it as the full documentatio
|
||||
|
||||
This pseudocode describes the interface that we suggest for your implementation. You can implement any features that you need for your application.
|
||||
|
||||
```
|
||||
```python
|
||||
from whisper_online import *
|
||||
|
||||
src_lan = "en" # source language
|
||||
@@ -216,12 +224,20 @@ In more detail: we use the init prompt, we handle the inaccurate timestamps, we
|
||||
re-process confirmed sentence prefixes and skip them, making sure they don't
|
||||
overlap, and we limit the processing buffer window.
|
||||
|
||||
Contributions are welcome.
|
||||
|
||||
### Performance evaluation
|
||||
|
||||
[See the paper.](http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/pdf/2023.ijcnlp-demo.3.pdf)
|
||||
|
||||
### Contributions
|
||||
|
||||
Contributions are welcome. We acknowledge especially:
|
||||
|
||||
- [The GitHub contributors](https://github.com/ufal/whisper_streaming/graphs/contributors) for their pull requests with new features and bugfixes.
|
||||
- [The translation of this repo into Chinese.](https://github.com/Gloridust/whisper_streaming_CN)
|
||||
- [Ondřej Plátek](https://opla.cz/) for the paper pre-review.
|
||||
- [Peter Polák](https://ufal.mff.cuni.cz/peter-polak) for the original idea.
|
||||
- The UEDIN team of the [ELITR project](https://elitr.eu) for the original line_packet.py.
|
||||
|
||||
|
||||
## Contact
|
||||
|
||||
|
||||
@@ -7,10 +7,13 @@ import time
|
||||
import logging
|
||||
|
||||
|
||||
import io
|
||||
import soundfile as sf
|
||||
import math
|
||||
|
||||
@lru_cache
|
||||
def load_audio(fname):
|
||||
a, _ = librosa.load(fname, sr=16000)
|
||||
a, _ = librosa.load(fname, sr=16000, dtype=np.float32)
|
||||
return a
|
||||
|
||||
def load_audio_chunk(fname, beg, end):
|
||||
@@ -31,7 +34,10 @@ class ASRBase:
|
||||
self.logfile = logfile
|
||||
|
||||
self.transcribe_kargs = {}
|
||||
self.original_language = lan
|
||||
if lan == "auto":
|
||||
self.original_language = None
|
||||
else:
|
||||
self.original_language = lan
|
||||
|
||||
self.model = self.load_model(modelsize, cache_dir, model_dir)
|
||||
|
||||
@@ -55,6 +61,7 @@ class WhisperTimestampedASR(ASRBase):
|
||||
|
||||
def load_model(self, modelsize=None, cache_dir=None, model_dir=None):
|
||||
import whisper
|
||||
import whisper_timestamped
|
||||
from whisper_timestamped import transcribe_timestamped
|
||||
self.transcribe_timestamped = transcribe_timestamped
|
||||
if model_dir is not None:
|
||||
@@ -119,8 +126,11 @@ class FasterWhisperASR(ASRBase):
|
||||
return model
|
||||
|
||||
def transcribe(self, audio, init_prompt=""):
|
||||
|
||||
# tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01)
|
||||
segments, info = self.model.transcribe(audio, language=self.original_language, initial_prompt=init_prompt, beam_size=5, word_timestamps=True, condition_on_previous_text=True, **self.transcribe_kargs)
|
||||
#print(info) # info contains language detection result
|
||||
|
||||
return list(segments)
|
||||
|
||||
def ts_words(self, segments):
|
||||
@@ -143,6 +153,93 @@ class FasterWhisperASR(ASRBase):
|
||||
self.transcribe_kargs["task"] = "translate"
|
||||
|
||||
|
||||
class OpenaiApiASR(ASRBase):
|
||||
"""Uses OpenAI's Whisper API for audio transcription."""
|
||||
|
||||
def __init__(self, lan=None, temperature=0, logfile=sys.stderr):
|
||||
self.logfile = logfile
|
||||
|
||||
self.modelname = "whisper-1"
|
||||
self.original_language = None if lan == "auto" else lan # ISO-639-1 language code
|
||||
self.response_format = "verbose_json"
|
||||
self.temperature = temperature
|
||||
|
||||
self.load_model()
|
||||
|
||||
self.use_vad_opt = False
|
||||
|
||||
# reset the task in set_translate_task
|
||||
self.task = "transcribe"
|
||||
|
||||
def load_model(self, *args, **kwargs):
|
||||
from openai import OpenAI
|
||||
self.client = OpenAI()
|
||||
|
||||
self.transcribed_seconds = 0 # for logging how many seconds were processed by API, to know the cost
|
||||
|
||||
|
||||
def ts_words(self, segments):
|
||||
no_speech_segments = []
|
||||
if self.use_vad_opt:
|
||||
for segment in segments.segments:
|
||||
# TODO: threshold can be set from outside
|
||||
if segment["no_speech_prob"] > 0.8:
|
||||
no_speech_segments.append((segment.get("start"), segment.get("end")))
|
||||
|
||||
o = []
|
||||
for word in segments.words:
|
||||
start = word.get("start")
|
||||
end = word.get("end")
|
||||
if any(s[0] <= start <= s[1] for s in no_speech_segments):
|
||||
# print("Skipping word", word.get("word"), "because it's in a no-speech segment")
|
||||
continue
|
||||
o.append((start, end, word.get("word")))
|
||||
return o
|
||||
|
||||
|
||||
def segments_end_ts(self, res):
|
||||
return [s["end"] for s in res.words]
|
||||
|
||||
def transcribe(self, audio_data, prompt=None, *args, **kwargs):
|
||||
# Write the audio data to a buffer
|
||||
buffer = io.BytesIO()
|
||||
buffer.name = "temp.wav"
|
||||
sf.write(buffer, audio_data, samplerate=16000, format='WAV', subtype='PCM_16')
|
||||
buffer.seek(0) # Reset buffer's position to the beginning
|
||||
|
||||
self.transcribed_seconds += math.ceil(len(audio_data)/16000) # it rounds up to the whole seconds
|
||||
|
||||
params = {
|
||||
"model": self.modelname,
|
||||
"file": buffer,
|
||||
"response_format": self.response_format,
|
||||
"temperature": self.temperature,
|
||||
"timestamp_granularities": ["word", "segment"]
|
||||
}
|
||||
if self.task != "translate" and self.original_language:
|
||||
params["language"] = self.original_language
|
||||
if prompt:
|
||||
params["prompt"] = prompt
|
||||
|
||||
if self.task == "translate":
|
||||
proc = self.client.audio.translations
|
||||
else:
|
||||
proc = self.client.audio.transcriptions
|
||||
|
||||
# Process transcription/translation
|
||||
transcript = proc.create(**params)
|
||||
logging.debug(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds")
|
||||
|
||||
return transcript
|
||||
|
||||
def use_vad(self):
|
||||
self.use_vad_opt = True
|
||||
|
||||
def set_translate_task(self):
|
||||
self.task = "translate"
|
||||
|
||||
|
||||
|
||||
|
||||
class HypothesisBuffer:
|
||||
|
||||
@@ -237,9 +334,6 @@ class OnlineASRProcessor:
|
||||
|
||||
self.transcript_buffer = HypothesisBuffer(logfile=self.logfile)
|
||||
self.commited = []
|
||||
self.last_chunked_at = 0
|
||||
|
||||
self.silence_iters = 0
|
||||
|
||||
def insert_audio_chunk(self, audio):
|
||||
self.audio_buffer = np.append(self.audio_buffer, audio)
|
||||
@@ -249,7 +343,7 @@ class OnlineASRProcessor:
|
||||
"context" is the commited text that is inside the audio buffer. It is transcribed again and skipped. It is returned only for debugging and logging reasons.
|
||||
"""
|
||||
k = max(0,len(self.commited)-1)
|
||||
while k > 0 and self.commited[k-1][1] > self.last_chunked_at:
|
||||
while k > 0 and self.commited[k-1][1] > self.buffer_time_offset:
|
||||
k -= 1
|
||||
|
||||
p = self.commited[:k]
|
||||
@@ -362,7 +456,6 @@ class OnlineASRProcessor:
|
||||
cut_seconds = time - self.buffer_time_offset
|
||||
self.audio_buffer = self.audio_buffer[int(cut_seconds*self.SAMPLING_RATE):]
|
||||
self.buffer_time_offset = time
|
||||
self.last_chunked_at = time
|
||||
|
||||
def words_to_sentences(self, words):
|
||||
"""Uses self.tokenizer for sentence segmentation of words.
|
||||
@@ -456,13 +549,42 @@ def add_shared_args(parser):
|
||||
parser.add_argument('--model', type=str, default='large-v2', choices="tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large".split(","),help="Name size of the Whisper model to use (default: large-v2). The model is automatically downloaded from the model hub if not present in model cache dir.")
|
||||
parser.add_argument('--model_cache_dir', type=str, default=None, help="Overriding the default model cache dir where models downloaded from the hub are saved")
|
||||
parser.add_argument('--model_dir', type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.")
|
||||
parser.add_argument('--lan', '--language', type=str, default='en', help="Language code for transcription, e.g. en,de,cs.")
|
||||
parser.add_argument('--lan', '--language', type=str, default='auto', help="Source language code, e.g. en,de,cs, or 'auto' for language detection.")
|
||||
parser.add_argument('--task', type=str, default='transcribe', choices=["transcribe","translate"],help="Transcribe or translate.")
|
||||
parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped"],help='Load only this backend for Whisper processing.')
|
||||
parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped", "openai-api"],help='Load only this backend for Whisper processing.')
|
||||
parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.')
|
||||
parser.add_argument('--buffer_trimming', type=str, default="segment", choices=["sentence", "segment"],help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.')
|
||||
parser.add_argument('--buffer_trimming_sec', type=float, default=15, help='Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.')
|
||||
|
||||
def asr_factory(args, logfile=sys.stderr):
|
||||
"""
|
||||
Creates and configures an ASR instance based on the specified backend and arguments.
|
||||
"""
|
||||
backend = args.backend
|
||||
if backend == "openai-api":
|
||||
logging.debug("Using OpenAI API.")
|
||||
asr = OpenaiApiASR(lan=args.lan)
|
||||
else:
|
||||
if backend == "faster-whisper":
|
||||
asr_cls = FasterWhisperASR
|
||||
else:
|
||||
asr_cls = WhisperTimestampedASR
|
||||
|
||||
# Only for FasterWhisperASR and WhisperTimestampedASR
|
||||
size = args.model
|
||||
t = time.time()
|
||||
logging.debug(f"Loading Whisper {size} model for {args.lan}...")
|
||||
asr = asr_cls(modelsize=size, lan=args.lan, cache_dir=args.model_cache_dir, model_dir=args.model_dir)
|
||||
e = time.time()
|
||||
logging.debug(f"done. It took {round(e-t,2)} seconds.")
|
||||
|
||||
# Apply common configurations
|
||||
if getattr(args, 'vad', False): # Checks if VAD argument is present and True
|
||||
logging.info("Setting VAD filter")
|
||||
asr.use_vad()
|
||||
|
||||
return asr
|
||||
|
||||
## main:
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -490,34 +612,15 @@ if __name__ == "__main__":
|
||||
duration = len(load_audio(audio_path))/SAMPLING_RATE
|
||||
logging.info("Audio duration is: %2.2f seconds" % duration)
|
||||
|
||||
size = args.model
|
||||
asr = asr_factory(args, logfile=logfile)
|
||||
language = args.lan
|
||||
|
||||
t = time.time()
|
||||
logging.info(f"Loading Whisper {size} model for {language}...")
|
||||
|
||||
if args.backend == "faster-whisper":
|
||||
asr_cls = FasterWhisperASR
|
||||
else:
|
||||
asr_cls = WhisperTimestampedASR
|
||||
|
||||
asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir)
|
||||
|
||||
if args.task == "translate":
|
||||
asr.set_translate_task()
|
||||
tgt_language = "en" # Whisper translates into English
|
||||
else:
|
||||
tgt_language = language # Whisper transcribes in this language
|
||||
|
||||
|
||||
e = time.time()
|
||||
logging.info(f"done. It took {round(e-t,2)} seconds.")
|
||||
|
||||
if args.vad:
|
||||
logging.info("setting VAD filter")
|
||||
asr.use_vad()
|
||||
|
||||
|
||||
min_chunk = args.min_chunk_size
|
||||
if args.buffer_trimming == "sentence":
|
||||
tokenizer = create_tokenizer(tgt_language)
|
||||
@@ -548,7 +651,8 @@ if __name__ == "__main__":
|
||||
print("%1.4f %1.0f %1.0f %s" % (now*1000, o[0]*1000,o[1]*1000,o[2]),file=logfile,flush=True)
|
||||
print("%1.4f %1.0f %1.0f %s" % (now*1000, o[0]*1000,o[1]*1000,o[2]),flush=True)
|
||||
else:
|
||||
print("here?", o,file=logfile,flush=True)
|
||||
# No text, so no output
|
||||
pass
|
||||
|
||||
if args.offline: ## offline mode processing (for testing/debugging)
|
||||
a = load_audio(audio_path)
|
||||
|
||||
@@ -5,6 +5,7 @@ import sys
|
||||
import argparse
|
||||
import os
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
@@ -33,20 +34,7 @@ SAMPLING_RATE = 16000
|
||||
size = args.model
|
||||
language = args.lan
|
||||
|
||||
t = time.time()
|
||||
logging.debug(f"Loading Whisper {size} model for {language}...")
|
||||
|
||||
if args.backend == "faster-whisper":
|
||||
from faster_whisper import WhisperModel
|
||||
asr_cls = FasterWhisperASR
|
||||
logging.getLogger("faster_whisper").setLevel(logging.WARNING)
|
||||
else:
|
||||
import whisper
|
||||
import whisper_timestamped
|
||||
# from whisper_timestamped_model import WhisperTimestampedASR
|
||||
asr_cls = WhisperTimestampedASR
|
||||
|
||||
asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir)
|
||||
asr = asr_factory(args)
|
||||
|
||||
if args.task == "translate":
|
||||
asr.set_translate_task()
|
||||
@@ -54,14 +42,6 @@ if args.task == "translate":
|
||||
else:
|
||||
tgt_language = language
|
||||
|
||||
e = time.time()
|
||||
logging.debug(f"done. It took {round(e-t,2)} seconds.")
|
||||
|
||||
if args.vad:
|
||||
logging.debug("setting VAD filter")
|
||||
asr.use_vad()
|
||||
|
||||
|
||||
min_chunk = args.min_chunk_size
|
||||
|
||||
if args.buffer_trimming == "sentence":
|
||||
@@ -141,7 +121,7 @@ class ServerProcessor:
|
||||
if not raw_bytes:
|
||||
break
|
||||
sf = soundfile.SoundFile(io.BytesIO(raw_bytes), channels=1,endian="LITTLE",samplerate=SAMPLING_RATE, subtype="PCM_16",format="RAW")
|
||||
audio, _ = librosa.load(sf,sr=SAMPLING_RATE)
|
||||
audio, _ = librosa.load(sf,sr=SAMPLING_RATE,dtype=np.float32)
|
||||
out.append(audio)
|
||||
if not out:
|
||||
return None
|
||||
|
||||
Reference in New Issue
Block a user