From c30969fe271794eb9fedc90c7b01f9a9f4461b0f Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Wed, 24 Jan 2024 15:31:18 +0100 Subject: [PATCH 01/43] OpenAI Whisper API backend --- whisper_online.py | 76 +++++++++++++++++++++++++++++++++++++++- whisper_online_server.py | 2 ++ 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/whisper_online.py b/whisper_online.py index 36bdbd6..860c82d 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -4,6 +4,8 @@ import numpy as np import librosa from functools import lru_cache import time +import io +import soundfile as sf @@ -142,6 +144,76 @@ class FasterWhisperASR(ASRBase): self.transcribe_kargs["task"] = "translate" +class OpenaiApiASR(ASRBase): + """Uses OpenAI's Whisper API for audio transcription.""" + + def __init__(self, modelsize=None, lan=None, cache_dir=None, model_dir=None, response_format="verbose_json", temperature=0): + self.modelname = "whisper-1" # modelsize is not used but kept for interface consistency + self.language = lan # ISO-639-1 language code + self.response_format = response_format + self.temperature = temperature + self.model = self.load_model(modelsize, cache_dir, model_dir) + + def load_model(self, *args, **kwargs): + from openai import OpenAI + self.client = OpenAI() + # Since we're using the OpenAI API, there's no model to load locally. + print("Model configuration is set to use the OpenAI Whisper API.") + + def ts_words(self, segments): + o = [] + for segment in segments: + # Skip segments containing no speech + if segment["no_speech_prob"] > 0.8: + continue + + # Splitting the text into words and filtering out empty strings + words = [word.strip() for word in segment["text"].split() if word.strip()] + + if not words: + continue + + # Assign start and end times for each word + # We only have timestamps per segment, so interpolating start and end-times + # assuming equal duration per word + segment_duration = segment["end"] - segment["start"] + duration_per_word = segment_duration / len(words) + start_time = segment["start"] + for word in words: + end_time = start_time + duration_per_word + o.append((start_time, end_time, word)) + start_time = end_time + + return o + + + def segments_end_ts(self, res): + return [s["end"] for s in res] + + def transcribe(self, audio_data, prompt=None, *args, **kwargs): + # Write the audio data to a buffer + buffer = io.BytesIO() + buffer.name = "temp.wav" + sf.write(buffer, audio_data, samplerate=16000, format='WAV', subtype='PCM_16') + buffer.seek(0) # Reset buffer's position to the beginning + + # Prepare transcription parameters + transcription_params = { + "model": self.modelname, + "file": buffer, + "response_format": self.response_format, + "temperature": self.temperature + } + if self.language: + transcription_params["language"] = self.language + if prompt: + transcription_params["prompt"] = prompt + + # Perform the transcription + transcript = self.client.audio.transcriptions.create(**transcription_params) + + return transcript.segments + class HypothesisBuffer: @@ -453,7 +525,7 @@ def add_shared_args(parser): parser.add_argument('--model_dir', type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.") parser.add_argument('--lan', '--language', type=str, default='en', help="Language code for transcription, e.g. en,de,cs.") parser.add_argument('--task', type=str, default='transcribe', choices=["transcribe","translate"],help="Transcribe or translate.") - parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped"],help='Load only this backend for Whisper processing.') + parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped", "openai-api"],help='Load only this backend for Whisper processing.') parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.') parser.add_argument('--buffer_trimming', type=str, default="segment", choices=["sentence", "segment"],help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.') parser.add_argument('--buffer_trimming_sec', type=float, default=15, help='Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.') @@ -493,6 +565,8 @@ if __name__ == "__main__": if args.backend == "faster-whisper": asr_cls = FasterWhisperASR + elif args.backend == "openai-api": + asr_cls = OpenaiApiASR else: asr_cls = WhisperTimestampedASR diff --git a/whisper_online_server.py b/whisper_online_server.py index b2f5120..13a85de 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -29,6 +29,8 @@ print(f"Loading Whisper {size} model for {language}...",file=sys.stderr,end=" ", if args.backend == "faster-whisper": from faster_whisper import WhisperModel asr_cls = FasterWhisperASR +elif args.backend == "openai-api": + asr_cls = OpenaiApiASR else: import whisper import whisper_timestamped From ab27bfb361da9ce38569a82f7002f3652571a5d9 Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Thu, 25 Jan 2024 10:08:21 +0100 Subject: [PATCH 02/43] Update documentation to include openai-api backend --- README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 584f3d0..8b2ffb8 100644 --- a/README.md +++ b/README.md @@ -31,14 +31,19 @@ Please, cite us. [Bibtex citation](http://www.afnlp.org/conferences/ijcnlp2023/p ## Installation -1) ``pip install librosa`` -- audio processing library +1) ``pip install librosa soundfile`` -- audio processing library 2) Whisper backend. -Two alternative backends are integrated. The most recommended one is [faster-whisper](https://github.com/guillaumekln/faster-whisper) with GPU support. Follow their instructions for NVIDIA libraries -- we succeeded with CUDNN 8.5.0 and CUDA 11.7. Install with `pip install faster-whisper`. + Several alternative backends are integrated. The most recommended one is [faster-whisper](https://github.com/guillaumekln/faster-whisper) with GPU support. Follow their instructions for NVIDIA libraries -- we succeeded with CUDNN 8.5.0 and CUDA 11.7. Install with `pip install faster-whisper`. Alternative, less restrictive, but slower backend is [whisper-timestamped](https://github.com/linto-ai/whisper-timestamped): `pip install git+https://github.com/linto-ai/whisper-timestamped` +Thirdly, it's also possible to run this software from the [OpenAI Whisper API](https://platform.openai.com/docs/api-reference/audio/createTranscription). This solution is fast and requires no GPU, just a small VM will suffice, but you will need to pay OpenAI for api access. Also note that, since each audio fragment is processed multiple times, the [price](https://openai.com/pricing) will be higher than obvious from the pricing page, so keep an eye on costs while using. Setting a higher chunk-size will reduce costs significantly. +Install with: `pip install openai` + +For running with the openai-api backend, make sure that your [OpenAI api key](https://platform.openai.com/api-keys) is set in the `OPENAI_API_KEY` environment variable. For example, before running, do: `export OPENAI_API_KEY=sk-xxx` with *sk-xxx* replaced with your api key. + The backend is loaded only when chosen. The unused one does not have to be installed. 3) Optional, not recommended: sentence segmenter (aka sentence tokenizer) @@ -69,7 +74,7 @@ In case of installation issues of opus-fast-mosestokenizer, especially on Window ``` usage: whisper_online.py [-h] [--min-chunk-size MIN_CHUNK_SIZE] [--model {tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large}] [--model_cache_dir MODEL_CACHE_DIR] [--model_dir MODEL_DIR] [--lan LAN] [--task {transcribe,translate}] - [--backend {faster-whisper,whisper_timestamped}] [--vad] [--buffer_trimming {sentence,segment}] [--buffer_trimming_sec BUFFER_TRIMMING_SEC] [--start_at START_AT] [--offline] [--comp_unaware] + [--backend {faster-whisper,whisper_timestamped,openai-api}] [--vad] [--buffer_trimming {sentence,segment}] [--buffer_trimming_sec BUFFER_TRIMMING_SEC] [--start_at START_AT] [--offline] [--comp_unaware] audio_path positional arguments: @@ -89,7 +94,7 @@ options: Language code for transcription, e.g. en,de,cs. --task {transcribe,translate} Transcribe or translate. - --backend {faster-whisper,whisper_timestamped} + --backend {faster-whisper,whisper_timestamped,openai-api} Load only this backend for Whisper processing. --vad Use VAD = voice activity detection, with the default parameters. --buffer_trimming {sentence,segment} From 50f1b94856cd916d7e5bca1650fc8fc8ff3104f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Thu, 25 Jan 2024 16:49:25 +0100 Subject: [PATCH 03/43] missing features in openai-api, PR #52 --- whisper_online.py | 88 ++++++++++++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 32 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 860c82d..e4bfff8 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -6,8 +6,7 @@ from functools import lru_cache import time import io import soundfile as sf - - +import math @lru_cache def load_audio(fname): @@ -147,24 +146,34 @@ class FasterWhisperASR(ASRBase): class OpenaiApiASR(ASRBase): """Uses OpenAI's Whisper API for audio transcription.""" - def __init__(self, modelsize=None, lan=None, cache_dir=None, model_dir=None, response_format="verbose_json", temperature=0): - self.modelname = "whisper-1" # modelsize is not used but kept for interface consistency + def __init__(self, lan=None, response_format="verbose_json", temperature=0, logfile=sys.stderr): + self.logfile = logfile + + self.modelname = "whisper-1" self.language = lan # ISO-639-1 language code self.response_format = response_format self.temperature = temperature - self.model = self.load_model(modelsize, cache_dir, model_dir) + + self.load_model() + + self.use_vad = False + + # reset the task in set_translate_task + self.task = "transcribe" def load_model(self, *args, **kwargs): from openai import OpenAI self.client = OpenAI() - # Since we're using the OpenAI API, there's no model to load locally. - print("Model configuration is set to use the OpenAI Whisper API.") + + self.transcribed_seconds = 0 # for logging how many seconds were processed by API, to know the cost + def ts_words(self, segments): o = [] for segment in segments: - # Skip segments containing no speech - if segment["no_speech_prob"] > 0.8: + # If VAD on, skip segments containing no speech. + # TODO: threshold can be set from outside + if self.use_vad and segment["no_speech_prob"] > 0.8: continue # Splitting the text into words and filtering out empty strings @@ -197,23 +206,39 @@ class OpenaiApiASR(ASRBase): sf.write(buffer, audio_data, samplerate=16000, format='WAV', subtype='PCM_16') buffer.seek(0) # Reset buffer's position to the beginning - # Prepare transcription parameters - transcription_params = { + self.transcribed_seconds += math.ceil(len(audio_data)/16000) # it rounds up to the whole seconds + + params = { "model": self.modelname, "file": buffer, "response_format": self.response_format, "temperature": self.temperature } - if self.language: + if self.task != "translate" and self.language: transcription_params["language"] = self.language if prompt: transcription_params["prompt"] = prompt - # Perform the transcription - transcript = self.client.audio.transcriptions.create(**transcription_params) + if self.task == "translate": + proc = self.client.audio.translations + else: + proc = self.client.audio.transcriptions + + # Process transcription/translation + + transcript = proc.create(**params) + print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds ",file=self.logfile) return transcript.segments + def use_vad(self): + self.use_vad = True + + def set_translate_task(self): + self.task = "translate" + + + class HypothesisBuffer: @@ -557,20 +582,27 @@ if __name__ == "__main__": duration = len(load_audio(audio_path))/SAMPLING_RATE print("Audio duration is: %2.2f seconds" % duration, file=logfile) - size = args.model language = args.lan - t = time.time() - print(f"Loading Whisper {size} model for {language}...",file=logfile,end=" ",flush=True) - - if args.backend == "faster-whisper": - asr_cls = FasterWhisperASR - elif args.backend == "openai-api": - asr_cls = OpenaiApiASR + if args.backend == "openai-api": + print("Using OpenAI API.",file=logfile) + asr = OpenaiApiASR(lan=language) else: - asr_cls = WhisperTimestampedASR + if args.backend == "faster-whisper": + asr_cls = FasterWhisperASR + else: + asr_cls = WhisperTimestampedASR - asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir) + size = args.model + t = time.time() + print(f"Loading Whisper {size} model for {language}...",file=logfile,end=" ",flush=True) + asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir) + e = time.time() + print(f"done. It took {round(e-t,2)} seconds.",file=logfile) + + if args.vad: + print("setting VAD filter",file=logfile) + asr.use_vad() if args.task == "translate": asr.set_translate_task() @@ -578,14 +610,6 @@ if __name__ == "__main__": else: tgt_language = language # Whisper transcribes in this language - - e = time.time() - print(f"done. It took {round(e-t,2)} seconds.",file=logfile) - - if args.vad: - print("setting VAD filter",file=logfile) - asr.use_vad() - min_chunk = args.min_chunk_size if args.buffer_trimming == "sentence": From d65fd8a649cb8f577e32b3cff193b85818ae7bad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Thu, 25 Jan 2024 17:53:07 +0100 Subject: [PATCH 04/43] fixes --- whisper_online.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index e4bfff8..2941920 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -215,9 +215,9 @@ class OpenaiApiASR(ASRBase): "temperature": self.temperature } if self.task != "translate" and self.language: - transcription_params["language"] = self.language + params["language"] = self.language if prompt: - transcription_params["prompt"] = prompt + params["prompt"] = prompt if self.task == "translate": proc = self.client.audio.translations @@ -227,7 +227,7 @@ class OpenaiApiASR(ASRBase): # Process transcription/translation transcript = proc.create(**params) - print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds ",file=self.logfile) + print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds",file=self.logfile) return transcript.segments From f412812082164a33e97ac1226d915c549e345619 Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Wed, 24 Jan 2024 15:31:18 +0100 Subject: [PATCH 05/43] OpenAI Whisper API backend --- whisper_online.py | 76 +++++++++++++++++++++++++++++++++++++++- whisper_online_server.py | 2 ++ 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/whisper_online.py b/whisper_online.py index 59d41e7..edab195 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -4,6 +4,8 @@ import numpy as np import librosa from functools import lru_cache import time +import io +import soundfile as sf @@ -148,6 +150,76 @@ class FasterWhisperASR(ASRBase): self.transcribe_kargs["task"] = "translate" +class OpenaiApiASR(ASRBase): + """Uses OpenAI's Whisper API for audio transcription.""" + + def __init__(self, modelsize=None, lan=None, cache_dir=None, model_dir=None, response_format="verbose_json", temperature=0): + self.modelname = "whisper-1" # modelsize is not used but kept for interface consistency + self.language = lan # ISO-639-1 language code + self.response_format = response_format + self.temperature = temperature + self.model = self.load_model(modelsize, cache_dir, model_dir) + + def load_model(self, *args, **kwargs): + from openai import OpenAI + self.client = OpenAI() + # Since we're using the OpenAI API, there's no model to load locally. + print("Model configuration is set to use the OpenAI Whisper API.") + + def ts_words(self, segments): + o = [] + for segment in segments: + # Skip segments containing no speech + if segment["no_speech_prob"] > 0.8: + continue + + # Splitting the text into words and filtering out empty strings + words = [word.strip() for word in segment["text"].split() if word.strip()] + + if not words: + continue + + # Assign start and end times for each word + # We only have timestamps per segment, so interpolating start and end-times + # assuming equal duration per word + segment_duration = segment["end"] - segment["start"] + duration_per_word = segment_duration / len(words) + start_time = segment["start"] + for word in words: + end_time = start_time + duration_per_word + o.append((start_time, end_time, word)) + start_time = end_time + + return o + + + def segments_end_ts(self, res): + return [s["end"] for s in res] + + def transcribe(self, audio_data, prompt=None, *args, **kwargs): + # Write the audio data to a buffer + buffer = io.BytesIO() + buffer.name = "temp.wav" + sf.write(buffer, audio_data, samplerate=16000, format='WAV', subtype='PCM_16') + buffer.seek(0) # Reset buffer's position to the beginning + + # Prepare transcription parameters + transcription_params = { + "model": self.modelname, + "file": buffer, + "response_format": self.response_format, + "temperature": self.temperature + } + if self.language: + transcription_params["language"] = self.language + if prompt: + transcription_params["prompt"] = prompt + + # Perform the transcription + transcript = self.client.audio.transcriptions.create(**transcription_params) + + return transcript.segments + class HypothesisBuffer: @@ -459,7 +531,7 @@ def add_shared_args(parser): parser.add_argument('--model_dir', type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.") parser.add_argument('--lan', '--language', type=str, default='en', help="Source language code, e.g. en,de,cs, or 'auto' for language detection.") parser.add_argument('--task', type=str, default='transcribe', choices=["transcribe","translate"],help="Transcribe or translate.") - parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped"],help='Load only this backend for Whisper processing.') + parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped", "openai-api"],help='Load only this backend for Whisper processing.') parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.') parser.add_argument('--buffer_trimming', type=str, default="segment", choices=["sentence", "segment"],help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.') parser.add_argument('--buffer_trimming_sec', type=float, default=15, help='Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.') @@ -499,6 +571,8 @@ if __name__ == "__main__": if args.backend == "faster-whisper": asr_cls = FasterWhisperASR + elif args.backend == "openai-api": + asr_cls = OpenaiApiASR else: asr_cls = WhisperTimestampedASR diff --git a/whisper_online_server.py b/whisper_online_server.py index b2f5120..13a85de 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -29,6 +29,8 @@ print(f"Loading Whisper {size} model for {language}...",file=sys.stderr,end=" ", if args.backend == "faster-whisper": from faster_whisper import WhisperModel asr_cls = FasterWhisperASR +elif args.backend == "openai-api": + asr_cls = OpenaiApiASR else: import whisper import whisper_timestamped From 6ec1f65fe2ebb9151792fb285826fb16efcf53ed Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Thu, 25 Jan 2024 10:08:21 +0100 Subject: [PATCH 06/43] Update documentation to include openai-api backend --- README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 26b6525..35f3fac 100644 --- a/README.md +++ b/README.md @@ -31,14 +31,19 @@ Please, cite us. [Bibtex citation](http://www.afnlp.org/conferences/ijcnlp2023/p ## Installation -1) ``pip install librosa`` -- audio processing library +1) ``pip install librosa soundfile`` -- audio processing library 2) Whisper backend. -Two alternative backends are integrated. The most recommended one is [faster-whisper](https://github.com/guillaumekln/faster-whisper) with GPU support. Follow their instructions for NVIDIA libraries -- we succeeded with CUDNN 8.5.0 and CUDA 11.7. Install with `pip install faster-whisper`. + Several alternative backends are integrated. The most recommended one is [faster-whisper](https://github.com/guillaumekln/faster-whisper) with GPU support. Follow their instructions for NVIDIA libraries -- we succeeded with CUDNN 8.5.0 and CUDA 11.7. Install with `pip install faster-whisper`. Alternative, less restrictive, but slower backend is [whisper-timestamped](https://github.com/linto-ai/whisper-timestamped): `pip install git+https://github.com/linto-ai/whisper-timestamped` +Thirdly, it's also possible to run this software from the [OpenAI Whisper API](https://platform.openai.com/docs/api-reference/audio/createTranscription). This solution is fast and requires no GPU, just a small VM will suffice, but you will need to pay OpenAI for api access. Also note that, since each audio fragment is processed multiple times, the [price](https://openai.com/pricing) will be higher than obvious from the pricing page, so keep an eye on costs while using. Setting a higher chunk-size will reduce costs significantly. +Install with: `pip install openai` + +For running with the openai-api backend, make sure that your [OpenAI api key](https://platform.openai.com/api-keys) is set in the `OPENAI_API_KEY` environment variable. For example, before running, do: `export OPENAI_API_KEY=sk-xxx` with *sk-xxx* replaced with your api key. + The backend is loaded only when chosen. The unused one does not have to be installed. 3) Optional, not recommended: sentence segmenter (aka sentence tokenizer) @@ -69,7 +74,7 @@ In case of installation issues of opus-fast-mosestokenizer, especially on Window ``` usage: whisper_online.py [-h] [--min-chunk-size MIN_CHUNK_SIZE] [--model {tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large}] [--model_cache_dir MODEL_CACHE_DIR] [--model_dir MODEL_DIR] [--lan LAN] [--task {transcribe,translate}] - [--backend {faster-whisper,whisper_timestamped}] [--vad] [--buffer_trimming {sentence,segment}] [--buffer_trimming_sec BUFFER_TRIMMING_SEC] [--start_at START_AT] [--offline] [--comp_unaware] + [--backend {faster-whisper,whisper_timestamped,openai-api}] [--vad] [--buffer_trimming {sentence,segment}] [--buffer_trimming_sec BUFFER_TRIMMING_SEC] [--start_at START_AT] [--offline] [--comp_unaware] audio_path positional arguments: @@ -89,7 +94,7 @@ options: Source language code, e.g. en,de,cs, or 'auto' for language detection. --task {transcribe,translate} Transcribe or translate. - --backend {faster-whisper,whisper_timestamped} + --backend {faster-whisper,whisper_timestamped,openai-api} Load only this backend for Whisper processing. --vad Use VAD = voice activity detection, with the default parameters. --buffer_trimming {sentence,segment} From f8b2ae07b870077e8e3b88fed4e23ec0e118d66c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Thu, 25 Jan 2024 16:49:25 +0100 Subject: [PATCH 07/43] missing features in openai-api, PR #52 --- whisper_online.py | 88 ++++++++++++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 32 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index edab195..e0515de 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -6,8 +6,7 @@ from functools import lru_cache import time import io import soundfile as sf - - +import math @lru_cache def load_audio(fname): @@ -153,24 +152,34 @@ class FasterWhisperASR(ASRBase): class OpenaiApiASR(ASRBase): """Uses OpenAI's Whisper API for audio transcription.""" - def __init__(self, modelsize=None, lan=None, cache_dir=None, model_dir=None, response_format="verbose_json", temperature=0): - self.modelname = "whisper-1" # modelsize is not used but kept for interface consistency + def __init__(self, lan=None, response_format="verbose_json", temperature=0, logfile=sys.stderr): + self.logfile = logfile + + self.modelname = "whisper-1" self.language = lan # ISO-639-1 language code self.response_format = response_format self.temperature = temperature - self.model = self.load_model(modelsize, cache_dir, model_dir) + + self.load_model() + + self.use_vad = False + + # reset the task in set_translate_task + self.task = "transcribe" def load_model(self, *args, **kwargs): from openai import OpenAI self.client = OpenAI() - # Since we're using the OpenAI API, there's no model to load locally. - print("Model configuration is set to use the OpenAI Whisper API.") + + self.transcribed_seconds = 0 # for logging how many seconds were processed by API, to know the cost + def ts_words(self, segments): o = [] for segment in segments: - # Skip segments containing no speech - if segment["no_speech_prob"] > 0.8: + # If VAD on, skip segments containing no speech. + # TODO: threshold can be set from outside + if self.use_vad and segment["no_speech_prob"] > 0.8: continue # Splitting the text into words and filtering out empty strings @@ -203,23 +212,39 @@ class OpenaiApiASR(ASRBase): sf.write(buffer, audio_data, samplerate=16000, format='WAV', subtype='PCM_16') buffer.seek(0) # Reset buffer's position to the beginning - # Prepare transcription parameters - transcription_params = { + self.transcribed_seconds += math.ceil(len(audio_data)/16000) # it rounds up to the whole seconds + + params = { "model": self.modelname, "file": buffer, "response_format": self.response_format, "temperature": self.temperature } - if self.language: + if self.task != "translate" and self.language: transcription_params["language"] = self.language if prompt: transcription_params["prompt"] = prompt - # Perform the transcription - transcript = self.client.audio.transcriptions.create(**transcription_params) + if self.task == "translate": + proc = self.client.audio.translations + else: + proc = self.client.audio.transcriptions + + # Process transcription/translation + + transcript = proc.create(**params) + print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds ",file=self.logfile) return transcript.segments + def use_vad(self): + self.use_vad = True + + def set_translate_task(self): + self.task = "translate" + + + class HypothesisBuffer: @@ -563,20 +588,27 @@ if __name__ == "__main__": duration = len(load_audio(audio_path))/SAMPLING_RATE print("Audio duration is: %2.2f seconds" % duration, file=logfile) - size = args.model language = args.lan - t = time.time() - print(f"Loading Whisper {size} model for {language}...",file=logfile,end=" ",flush=True) - - if args.backend == "faster-whisper": - asr_cls = FasterWhisperASR - elif args.backend == "openai-api": - asr_cls = OpenaiApiASR + if args.backend == "openai-api": + print("Using OpenAI API.",file=logfile) + asr = OpenaiApiASR(lan=language) else: - asr_cls = WhisperTimestampedASR + if args.backend == "faster-whisper": + asr_cls = FasterWhisperASR + else: + asr_cls = WhisperTimestampedASR - asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir) + size = args.model + t = time.time() + print(f"Loading Whisper {size} model for {language}...",file=logfile,end=" ",flush=True) + asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir) + e = time.time() + print(f"done. It took {round(e-t,2)} seconds.",file=logfile) + + if args.vad: + print("setting VAD filter",file=logfile) + asr.use_vad() if args.task == "translate": asr.set_translate_task() @@ -584,14 +616,6 @@ if __name__ == "__main__": else: tgt_language = language # Whisper transcribes in this language - - e = time.time() - print(f"done. It took {round(e-t,2)} seconds.",file=logfile) - - if args.vad: - print("setting VAD filter",file=logfile) - asr.use_vad() - min_chunk = args.min_chunk_size if args.buffer_trimming == "sentence": From 2270014219c89d3f4243438f3775aea763421d41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Thu, 25 Jan 2024 17:53:07 +0100 Subject: [PATCH 08/43] fixes --- whisper_online.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index e0515de..d60cc84 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -221,9 +221,9 @@ class OpenaiApiASR(ASRBase): "temperature": self.temperature } if self.task != "translate" and self.language: - transcription_params["language"] = self.language + params["language"] = self.language if prompt: - transcription_params["prompt"] = prompt + params["prompt"] = prompt if self.task == "translate": proc = self.client.audio.translations @@ -233,7 +233,7 @@ class OpenaiApiASR(ASRBase): # Process transcription/translation transcript = proc.create(**params) - print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds ",file=self.logfile) + print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds",file=self.logfile) return transcript.segments From 531418ad07431fec99ec88b3efb73a7d476bfe9e Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Thu, 8 Feb 2024 23:33:52 +0100 Subject: [PATCH 09/43] Interpolate word timestamps based on word character length --- whisper_online.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index d60cc84..477297a 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -190,12 +190,14 @@ class OpenaiApiASR(ASRBase): # Assign start and end times for each word # We only have timestamps per segment, so interpolating start and end-times - # assuming equal duration per word + + segment_duration = segment["end"] - segment["start"] - duration_per_word = segment_duration / len(words) + total_characters = sum(len(word) for word in words) + duration_per_character = segment_duration / total_characters start_time = segment["start"] for word in words: - end_time = start_time + duration_per_word + end_time = start_time + duration_per_character * len(word) o.append((start_time, end_time, word)) start_time = end_time From 3696fef2b1d753f0ce2e27a892fb7e1954687d0f Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Sat, 10 Feb 2024 14:07:15 +0100 Subject: [PATCH 10/43] Use OpenAI api word-level timestamps --- whisper_online.py | 36 ++++++++++-------------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 477297a..80cd68b 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -176,30 +176,14 @@ class OpenaiApiASR(ASRBase): def ts_words(self, segments): o = [] - for segment in segments: - # If VAD on, skip segments containing no speech. - # TODO: threshold can be set from outside - if self.use_vad and segment["no_speech_prob"] > 0.8: - continue + # If VAD on, skip segments containing no speech. + # TODO: threshold can be set from outside + # TODO: Make VAD work again with word-level timestamps + #if self.use_vad and segment["no_speech_prob"] > 0.8: + # continue - # Splitting the text into words and filtering out empty strings - words = [word.strip() for word in segment["text"].split() if word.strip()] - - if not words: - continue - - # Assign start and end times for each word - # We only have timestamps per segment, so interpolating start and end-times - - - segment_duration = segment["end"] - segment["start"] - total_characters = sum(len(word) for word in words) - duration_per_character = segment_duration / total_characters - start_time = segment["start"] - for word in words: - end_time = start_time + duration_per_character * len(word) - o.append((start_time, end_time, word)) - start_time = end_time + for word in segments: + o.append((word.get("start"), word.get("end"), word.get("word"))) return o @@ -220,7 +204,8 @@ class OpenaiApiASR(ASRBase): "model": self.modelname, "file": buffer, "response_format": self.response_format, - "temperature": self.temperature + "temperature": self.temperature, + "timestamp_granularities": ["word"] } if self.task != "translate" and self.language: params["language"] = self.language @@ -233,11 +218,10 @@ class OpenaiApiASR(ASRBase): proc = self.client.audio.transcriptions # Process transcription/translation - transcript = proc.create(**params) print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds",file=self.logfile) - return transcript.segments + return transcript.words def use_vad(self): self.use_vad = True From f0a24cd5e102a2f8215613ac295bddc78463cef8 Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Sat, 10 Feb 2024 15:29:18 +0100 Subject: [PATCH 11/43] Make --vad work with --backend openai-api --- whisper_online.py | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 80cd68b..1bb2a28 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -162,7 +162,7 @@ class OpenaiApiASR(ASRBase): self.load_model() - self.use_vad = False + self.use_vad_opt = False # reset the task in set_translate_task self.task = "transcribe" @@ -175,21 +175,27 @@ class OpenaiApiASR(ASRBase): def ts_words(self, segments): - o = [] - # If VAD on, skip segments containing no speech. - # TODO: threshold can be set from outside - # TODO: Make VAD work again with word-level timestamps - #if self.use_vad and segment["no_speech_prob"] > 0.8: - # continue + no_speech_segments = [] + if self.use_vad_opt: + for segment in segments.segments: + # TODO: threshold can be set from outside + if segment["no_speech_prob"] > 0.8: + no_speech_segments.append((segment.get("start"), segment.get("end"))) - for word in segments: - o.append((word.get("start"), word.get("end"), word.get("word"))) + o = [] + for word in segments.words: + start = word.get("start") + end = word.get("end") + if any(s[0] <= start <= s[1] for s in no_speech_segments): + # print("Skipping word", word.get("word"), "because it's in a no-speech segment") + continue + o.append((start, end, word.get("word"))) return o def segments_end_ts(self, res): - return [s["end"] for s in res] + return [s["end"] for s in res.words] def transcribe(self, audio_data, prompt=None, *args, **kwargs): # Write the audio data to a buffer @@ -205,7 +211,7 @@ class OpenaiApiASR(ASRBase): "file": buffer, "response_format": self.response_format, "temperature": self.temperature, - "timestamp_granularities": ["word"] + "timestamp_granularities": ["word", "segment"] } if self.task != "translate" and self.language: params["language"] = self.language @@ -221,10 +227,10 @@ class OpenaiApiASR(ASRBase): transcript = proc.create(**params) print(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds",file=self.logfile) - return transcript.words + return transcript def use_vad(self): - self.use_vad = True + self.use_vad_opt = True def set_translate_task(self): self.task = "translate" @@ -592,9 +598,9 @@ if __name__ == "__main__": e = time.time() print(f"done. It took {round(e-t,2)} seconds.",file=logfile) - if args.vad: - print("setting VAD filter",file=logfile) - asr.use_vad() + if args.vad: + print("setting VAD filter",file=logfile) + asr.use_vad() if args.task == "translate": asr.set_translate_task() From 922ad18ebcc117765de13692c49f024a09b7bfb9 Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Wed, 14 Feb 2024 17:29:45 +0100 Subject: [PATCH 12/43] Make OpenAI backend work with language autodetect --- whisper_online.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 1bb2a28..8c1916e 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -152,12 +152,12 @@ class FasterWhisperASR(ASRBase): class OpenaiApiASR(ASRBase): """Uses OpenAI's Whisper API for audio transcription.""" - def __init__(self, lan=None, response_format="verbose_json", temperature=0, logfile=sys.stderr): + def __init__(self, lan=None, temperature=0, logfile=sys.stderr): self.logfile = logfile self.modelname = "whisper-1" - self.language = lan # ISO-639-1 language code - self.response_format = response_format + self.original_language = None if lan == "auto" else lan # ISO-639-1 language code + self.response_format = "verbose_json" self.temperature = temperature self.load_model() @@ -213,8 +213,8 @@ class OpenaiApiASR(ASRBase): "temperature": self.temperature, "timestamp_granularities": ["word", "segment"] } - if self.task != "translate" and self.language: - params["language"] = self.language + if self.task != "translate" and self.original_language: + params["language"] = self.original_language if prompt: params["prompt"] = prompt From 9fcd403439d95330b178354fd37d9e38af9a2d66 Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Thu, 15 Feb 2024 22:24:43 +0100 Subject: [PATCH 13/43] Use automatic language detection by default (instead of English) --- whisper_online.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/whisper_online.py b/whisper_online.py index 8c1916e..7a6887d 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -546,7 +546,7 @@ def add_shared_args(parser): parser.add_argument('--model', type=str, default='large-v2', choices="tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large".split(","),help="Name size of the Whisper model to use (default: large-v2). The model is automatically downloaded from the model hub if not present in model cache dir.") parser.add_argument('--model_cache_dir', type=str, default=None, help="Overriding the default model cache dir where models downloaded from the hub are saved") parser.add_argument('--model_dir', type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.") - parser.add_argument('--lan', '--language', type=str, default='en', help="Source language code, e.g. en,de,cs, or 'auto' for language detection.") + parser.add_argument('--lan', '--language', type=str, default='auto', help="Source language code, e.g. en,de,cs, or 'auto' for language detection.") parser.add_argument('--task', type=str, default='transcribe', choices=["transcribe","translate"],help="Transcribe or translate.") parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped", "openai-api"],help='Load only this backend for Whisper processing.') parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.') From 80eb0baf5d242e952b83f79550c2388f7e0515f4 Mon Sep 17 00:00:00 2001 From: Aleksei Scripnic Date: Wed, 3 Jan 2024 10:06:05 +0000 Subject: [PATCH 14/43] Removed duplicate variable self.last_chunked_at I tried to find the difference between self.last_chunked_at and self.buffer_time_offset, and it took me a while to understand that they are exactly the same. I think it's better to get rid of one of the duplicates to make the code more readable. --- whisper_online.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 7672cc8..fd66319 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -328,7 +328,6 @@ class OnlineASRProcessor: self.transcript_buffer = HypothesisBuffer(logfile=self.logfile) self.commited = [] - self.last_chunked_at = 0 self.silence_iters = 0 @@ -340,7 +339,7 @@ class OnlineASRProcessor: "context" is the commited text that is inside the audio buffer. It is transcribed again and skipped. It is returned only for debugging and logging reasons. """ k = max(0,len(self.commited)-1) - while k > 0 and self.commited[k-1][1] > self.last_chunked_at: + while k > 0 and self.commited[k-1][1] > self.buffer_time_offset: k -= 1 p = self.commited[:k] @@ -451,7 +450,6 @@ class OnlineASRProcessor: cut_seconds = time - self.buffer_time_offset self.audio_buffer = self.audio_buffer[int(cut_seconds*self.SAMPLING_RATE):] self.buffer_time_offset = time - self.last_chunked_at = time def words_to_sentences(self, words): """Uses self.tokenizer for sentence segmentation of words. From db8b7d28837d69070feded662d63d02b5da6ddab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Tue, 20 Feb 2024 14:36:14 +0100 Subject: [PATCH 15/43] removed unused variable --- whisper_online.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index fd66319..2794506 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -329,8 +329,6 @@ class OnlineASRProcessor: self.transcript_buffer = HypothesisBuffer(logfile=self.logfile) self.commited = [] - self.silence_iters = 0 - def insert_audio_chunk(self, audio): self.audio_buffer = np.append(self.audio_buffer, audio) From 24926c98e0891d2972a8b438bd0898b472d5f128 Mon Sep 17 00:00:00 2001 From: koiking213 Date: Tue, 20 Feb 2024 22:46:04 +0900 Subject: [PATCH 16/43] specify audio dtype --- whisper_online.py | 2 +- whisper_online_server.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 2794506..bcaeec4 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -11,7 +11,7 @@ import math @lru_cache def load_audio(fname): a, _ = librosa.load(fname, sr=16000) - return a + return a.astype('float32') def load_audio_chunk(fname, beg, end): audio = load_audio(fname) diff --git a/whisper_online_server.py b/whisper_online_server.py index 13a85de..a609ef0 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -138,7 +138,7 @@ class ServerProcessor: break sf = soundfile.SoundFile(io.BytesIO(raw_bytes), channels=1,endian="LITTLE",samplerate=SAMPLING_RATE, subtype="PCM_16",format="RAW") audio, _ = librosa.load(sf,sr=SAMPLING_RATE) - out.append(audio) + out.append(audio.astype('float32')) if not out: return None return np.concatenate(out) From 4405c451ce419539246b056112d44da70850c641 Mon Sep 17 00:00:00 2001 From: koiking213 Date: Tue, 20 Feb 2024 23:29:25 +0900 Subject: [PATCH 17/43] specify dtype for librosa.load, instead of cast --- whisper_online.py | 4 ++-- whisper_online_server.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index bcaeec4..d79396a 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -10,8 +10,8 @@ import math @lru_cache def load_audio(fname): - a, _ = librosa.load(fname, sr=16000) - return a.astype('float32') + a, _ = librosa.load(fname, sr=16000, dtype=np.float32) + return a def load_audio_chunk(fname, beg, end): audio = load_audio(fname) diff --git a/whisper_online_server.py b/whisper_online_server.py index a609ef0..0cdc97d 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -4,6 +4,7 @@ from whisper_online import * import sys import argparse import os +import numpy as np parser = argparse.ArgumentParser() # server options @@ -137,8 +138,8 @@ class ServerProcessor: if not raw_bytes: break sf = soundfile.SoundFile(io.BytesIO(raw_bytes), channels=1,endian="LITTLE",samplerate=SAMPLING_RATE, subtype="PCM_16",format="RAW") - audio, _ = librosa.load(sf,sr=SAMPLING_RATE) - out.append(audio.astype('float32')) + audio, _ = librosa.load(sf,sr=SAMPLING_RATE,dtype=np.float32) + out.append(audio) if not out: return None return np.concatenate(out) From 5929a828969f9462f69efd39441be762bb09f58f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Mon, 11 Mar 2024 12:38:44 +0100 Subject: [PATCH 18/43] Update README.md bibtex update --- README.md | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 35f3fac..16d3a23 100644 --- a/README.md +++ b/README.md @@ -14,18 +14,23 @@ Demo video: https://player.vimeo.com/video/840442741 [Slides](http://ufallab.ms.mff.cuni.cz/~machacek/pre-prints/AACL23-2.11.2023-Turning-Whisper-oral.pdf) -- 15 minutes oral presentation at IJCNLP-AACL 2023 -Please, cite us. [Bibtex citation](http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/bib/2023.ijcnlp-demo.3.bib): +Please, cite us. [ACL Anthology](https://aclanthology.org/2023.ijcnlp-demo.3/), [Bibtex citation](https://aclanthology.org/2023.ijcnlp-demo.3.bib): ``` -@InProceedings{machacek-dabre-bojar:2023:ijcnlp, - author = {Macháček, Dominik and Dabre, Raj and Bojar, Ondřej}, - title = {Turning Whisper into Real-Time Transcription System}, - booktitle = {System Demonstrations}, - month = {November}, - year = {2023}, - address = {Bali, Indonesia}, - publisher = {Asian Federation of Natural Language Processing}, - pages = {17--24}, +@inproceedings{machacek-etal-2023-turning, + title = "Turning Whisper into Real-Time Transcription System", + author = "Mach{\'a}{\v{c}}ek, Dominik and + Dabre, Raj and + Bojar, Ond{\v{r}}ej", + editor = "Saha, Sriparna and + Sujaini, Herry", + booktitle = "Proceedings of the 13th International Joint Conference on Natural Language Processing and the 3rd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics: System Demonstrations", + month = nov, + year = "2023", + address = "Bali, Indonesia", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2023.ijcnlp-demo.3", + pages = "17--24", } ``` From 8896389ea3e7b142b21acc7a2ade9d42bd39cedd Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Wed, 20 Mar 2024 15:29:10 +0100 Subject: [PATCH 19/43] Fix crash when using openai-api with whisper_online_server + refactored creation of the ASR into a factory method --- whisper_online.py | 53 ++++++++++++++++++++++++---------------- whisper_online_server.py | 25 +------------------ 2 files changed, 33 insertions(+), 45 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index d79396a..c90babb 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -548,6 +548,37 @@ def add_shared_args(parser): parser.add_argument('--buffer_trimming', type=str, default="segment", choices=["sentence", "segment"],help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.') parser.add_argument('--buffer_trimming_sec', type=float, default=15, help='Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.') +def asr_factory(args, logfile=sys.stderr): + """ + Creates and configures an ASR instance based on the specified backend and arguments. + """ + backend = args.backend + if backend == "openai-api": + print("Using OpenAI API.", file=logfile) + asr = OpenaiApiASR(lan=args.lan) + else: + if backend == "faster-whisper": + from faster_whisper import FasterWhisperASR + asr_cls = FasterWhisperASR + else: + from whisper_timestamped import WhisperTimestampedASR + asr_cls = WhisperTimestampedASR + + # Only for FasterWhisperASR and WhisperTimestampedASR + size = args.model + t = time.time() + print(f"Loading Whisper {size} model for {args.lan}...", file=logfile, end=" ", flush=True) + asr = asr_cls(modelsize=size, lan=args.lan, cache_dir=args.model_cache_dir, model_dir=args.model_dir) + e = time.time() + print(f"done. It took {round(e-t,2)} seconds.", file=logfile) + + # Apply common configurations + if getattr(args, 'vad', False): # Checks if VAD argument is present and True + print("Setting VAD filter", file=logfile) + asr.use_vad() + + return asr + ## main: if __name__ == "__main__": @@ -575,28 +606,8 @@ if __name__ == "__main__": duration = len(load_audio(audio_path))/SAMPLING_RATE print("Audio duration is: %2.2f seconds" % duration, file=logfile) + asr = asr_factory(args, logfile=logfile) language = args.lan - - if args.backend == "openai-api": - print("Using OpenAI API.",file=logfile) - asr = OpenaiApiASR(lan=language) - else: - if args.backend == "faster-whisper": - asr_cls = FasterWhisperASR - else: - asr_cls = WhisperTimestampedASR - - size = args.model - t = time.time() - print(f"Loading Whisper {size} model for {language}...",file=logfile,end=" ",flush=True) - asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir) - e = time.time() - print(f"done. It took {round(e-t,2)} seconds.",file=logfile) - - if args.vad: - print("setting VAD filter",file=logfile) - asr.use_vad() - if args.task == "translate": asr.set_translate_task() tgt_language = "en" # Whisper translates into English diff --git a/whisper_online_server.py b/whisper_online_server.py index 0cdc97d..7f81caa 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -24,36 +24,13 @@ SAMPLING_RATE = 16000 size = args.model language = args.lan -t = time.time() -print(f"Loading Whisper {size} model for {language}...",file=sys.stderr,end=" ",flush=True) - -if args.backend == "faster-whisper": - from faster_whisper import WhisperModel - asr_cls = FasterWhisperASR -elif args.backend == "openai-api": - asr_cls = OpenaiApiASR -else: - import whisper - import whisper_timestamped -# from whisper_timestamped_model import WhisperTimestampedASR - asr_cls = WhisperTimestampedASR - -asr = asr_cls(modelsize=size, lan=language, cache_dir=args.model_cache_dir, model_dir=args.model_dir) - +asr = asr_factory(args) if args.task == "translate": asr.set_translate_task() tgt_language = "en" else: tgt_language = language -e = time.time() -print(f"done. It took {round(e-t,2)} seconds.",file=sys.stderr) - -if args.vad: - print("setting VAD filter",file=sys.stderr) - asr.use_vad() - - min_chunk = args.min_chunk_size if args.buffer_trimming == "sentence": From 006de3e7b044533bd89c0f228a7ce388bed41a51 Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Wed, 20 Mar 2024 16:02:24 +0100 Subject: [PATCH 20/43] Fix imports Now, the ASR implementations do their own imports. No need to import in the factory --- whisper_online.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index c90babb..a00547e 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -58,6 +58,7 @@ class WhisperTimestampedASR(ASRBase): def load_model(self, modelsize=None, cache_dir=None, model_dir=None): import whisper + import whisper_timestamped from whisper_timestamped import transcribe_timestamped self.transcribe_timestamped = transcribe_timestamped if model_dir is not None: @@ -558,10 +559,8 @@ def asr_factory(args, logfile=sys.stderr): asr = OpenaiApiASR(lan=args.lan) else: if backend == "faster-whisper": - from faster_whisper import FasterWhisperASR asr_cls = FasterWhisperASR else: - from whisper_timestamped import WhisperTimestampedASR asr_cls = WhisperTimestampedASR # Only for FasterWhisperASR and WhisperTimestampedASR From bccbb15177137f5ea7246b2211791f2481c41279 Mon Sep 17 00:00:00 2001 From: Tijs Zwinkels Date: Wed, 20 Mar 2024 16:29:01 +0100 Subject: [PATCH 21/43] Move creation of OnlineASRProcessor inside the factory method Preventing more code duplication between whisper_online.py and whisper_online_server.py --- whisper_online.py | 35 ++++++++++++++++++----------------- whisper_online_server.py | 17 +---------------- 2 files changed, 19 insertions(+), 33 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index a00547e..c4a90e3 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -551,7 +551,7 @@ def add_shared_args(parser): def asr_factory(args, logfile=sys.stderr): """ - Creates and configures an ASR instance based on the specified backend and arguments. + Creates and configures an ASR and ASR Online instance based on the specified backend and arguments. """ backend = args.backend if backend == "openai-api": @@ -576,8 +576,23 @@ def asr_factory(args, logfile=sys.stderr): print("Setting VAD filter", file=logfile) asr.use_vad() - return asr + language = args.lan + if args.task == "translate": + asr.set_translate_task() + tgt_language = "en" # Whisper translates into English + else: + tgt_language = language # Whisper transcribes in this language + # Create the tokenizer + if args.buffer_trimming == "sentence": + tokenizer = create_tokenizer(tgt_language) + else: + tokenizer = None + + # Create the OnlineASRProcessor + online = OnlineASRProcessor(asr,tokenizer,logfile=logfile,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) + + return asr, online ## main: if __name__ == "__main__": @@ -605,22 +620,8 @@ if __name__ == "__main__": duration = len(load_audio(audio_path))/SAMPLING_RATE print("Audio duration is: %2.2f seconds" % duration, file=logfile) - asr = asr_factory(args, logfile=logfile) - language = args.lan - if args.task == "translate": - asr.set_translate_task() - tgt_language = "en" # Whisper translates into English - else: - tgt_language = language # Whisper transcribes in this language - - + asr, online = asr_factory(args, logfile=logfile) min_chunk = args.min_chunk_size - if args.buffer_trimming == "sentence": - tokenizer = create_tokenizer(tgt_language) - else: - tokenizer = None - online = OnlineASRProcessor(asr,tokenizer,logfile=logfile,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) - # load the audio into the LRU cache before we start the timer a = load_audio_chunk(audio_path,0,1) diff --git a/whisper_online_server.py b/whisper_online_server.py index 7f81caa..188038a 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -23,24 +23,9 @@ SAMPLING_RATE = 16000 size = args.model language = args.lan - -asr = asr_factory(args) -if args.task == "translate": - asr.set_translate_task() - tgt_language = "en" -else: - tgt_language = language - +asr, online = asr_factory(args) min_chunk = args.min_chunk_size -if args.buffer_trimming == "sentence": - tokenizer = create_tokenizer(tgt_language) -else: - tokenizer = None -online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) - - - demo_audio_path = "cs-maji-2.16k.wav" if os.path.exists(demo_audio_path): # load the audio into the LRU cache before we start the timer From b3647da0875f9f4f5d2e57b0a57efdc1cf5cfa10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Fri, 29 Mar 2024 09:19:59 +0100 Subject: [PATCH 22/43] Update README.md PDF link --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 16d3a23..11cc555 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,9 @@ Demonstration paper, by Dominik Macháček, Raj Dabre, Ondřej Bojar, 2023 Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference. -Paper in proceedings: http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/pdf/2023.ijcnlp-demo.3.pdf +Paper PDF: +https://aclanthology.org/2023.ijcnlp-demo.3.pdf + Demo video: https://player.vimeo.com/video/840442741 From 8223afee7888607879870ae11acd246e4f7a6ae2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tobias=20G=C3=A5rdhus?= Date: Fri, 29 Mar 2024 19:35:30 +0100 Subject: [PATCH 23/43] Update README.md Add Python syntax highlighting to code chunk --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 11cc555..ae63e19 100644 --- a/README.md +++ b/README.md @@ -159,7 +159,7 @@ The code whisper_online.py is nicely commented, read it as the full documentatio This pseudocode describes the interface that we suggest for your implementation. You can implement any features that you need for your application. -``` +```python from whisper_online import * src_lan = "en" # source language From d497503b5c86b6e4cf21eb9f1f4eaa4e60e1b1f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Wed, 10 Apr 2024 18:13:07 +0200 Subject: [PATCH 24/43] COntributions at README.md + nicer formatting + #77 --- README.md | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index ae63e19..707e58d 100644 --- a/README.md +++ b/README.md @@ -3,16 +3,12 @@ Whisper realtime streaming for long speech-to-text transcription and translation **Turning Whisper into Real-Time Transcription System** -Demonstration paper, by Dominik Macháček, Raj Dabre, Ondřej Bojar, 2023 +Demonstration paper, by [Dominik Macháček](https://ufal.mff.cuni.cz/dominik-machacek), [Raj Dabre](https://prajdabre.github.io/), [Ondřej Bojar](https://ufal.mff.cuni.cz/ondrej-bojar), 2023 -Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference. +Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real-time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference. -Paper PDF: -https://aclanthology.org/2023.ijcnlp-demo.3.pdf - - -Demo video: https://player.vimeo.com/video/840442741 +[Paper PDF](https://aclanthology.org/2023.ijcnlp-demo.3.pdf), [Demo video](https://player.vimeo.com/video/840442741) [Slides](http://ufallab.ms.mff.cuni.cz/~machacek/pre-prints/AACL23-2.11.2023-Turning-Whisper-oral.pdf) -- 15 minutes oral presentation at IJCNLP-AACL 2023 @@ -228,12 +224,20 @@ In more detail: we use the init prompt, we handle the inaccurate timestamps, we re-process confirmed sentence prefixes and skip them, making sure they don't overlap, and we limit the processing buffer window. -Contributions are welcome. - ### Performance evaluation [See the paper.](http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/pdf/2023.ijcnlp-demo.3.pdf) +### Contributions + +Contributions are welcome. We acknowledge especially: + +- [The GitHub contributors](https://github.com/ufal/whisper_streaming/graphs/contributors) for their pull requests with new features and bugfixes. +- [The translation of this repo into Chinese.](https://github.com/Gloridust/whisper_streaming_CN) +- [Ondřej Plátek](https://opla.cz/) for the paper pre-review. +- [Peter Polák](https://ufal.mff.cuni.cz/peter-polak) for the original idea. +- The UEDIN team of the [ELITR project](https://elitr.eu) for the original line_packet.py. + ## Contact From 5ebbed3bd70bc51294113a8ff08af074b2d925ea Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 14:24:59 +0100 Subject: [PATCH 25/43] Turn prints into logging.debug calls in whisper_online_server.py --- whisper_online_server.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index b2f5120..98022e0 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -4,17 +4,27 @@ from whisper_online import * import sys import argparse import os +import logging + parser = argparse.ArgumentParser() # server options parser.add_argument("--host", type=str, default='localhost') parser.add_argument("--port", type=int, default=43007) +parser.add_argument("-l", "--log-level", dest="log_level", + choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], + help="Set the log level", + default='INFO') + # options from whisper_online add_shared_args(parser) args = parser.parse_args() +if args.log_level: + logging.basicConfig(format='whisper-server-%(levelname)s: %(message)s', + level=getattr(logging, args.log_level)) # setting whisper object by args @@ -24,7 +34,7 @@ size = args.model language = args.lan t = time.time() -print(f"Loading Whisper {size} model for {language}...",file=sys.stderr,end=" ",flush=True) +logging.debug(f"Loading Whisper {size} model for {language}...") if args.backend == "faster-whisper": from faster_whisper import WhisperModel @@ -44,10 +54,10 @@ else: tgt_language = language e = time.time() -print(f"done. It took {round(e-t,2)} seconds.",file=sys.stderr) +logging.debug(f"done. It took {round(e-t,2)} seconds.") if args.vad: - print("setting VAD filter",file=sys.stderr) + logging.debug("setting VAD filter") asr.use_vad() @@ -70,9 +80,7 @@ if os.path.exists(demo_audio_path): # warm up the ASR, because the very first transcribe takes much more time than the other asr.transcribe(a) else: - print("Whisper is not warmed up",file=sys.stderr) - - + logging.info("Whisper is not warmed up") ######### Server objects @@ -80,9 +88,6 @@ else: import line_packet import socket -import logging - - class Connection: '''it wraps conn object''' PACKET_SIZE = 65536 @@ -191,11 +196,6 @@ class ServerProcessor: - -# Start logging. -level = logging.INFO -logging.basicConfig(level=level, format='whisper-server-%(levelname)s: %(message)s') - # server loop with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: From 380c30d48d2ff0e445409eb8d86a5d0e9adad392 Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:14:56 +0100 Subject: [PATCH 26/43] Further tidying of print output, so by default there's little on the console --- whisper_online.py | 72 +++++++++++++++++++++------------------- whisper_online_server.py | 10 +++--- 2 files changed, 42 insertions(+), 40 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 36bdbd6..c6e98cf 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -4,6 +4,7 @@ import numpy as np import librosa from functools import lru_cache import time +import logging @@ -57,7 +58,7 @@ class WhisperTimestampedASR(ASRBase): from whisper_timestamped import transcribe_timestamped self.transcribe_timestamped = transcribe_timestamped if model_dir is not None: - print("ignoring model_dir, not implemented",file=self.logfile) + logging.debug("ignoring model_dir, not implemented") return whisper.load_model(modelsize, download_root=cache_dir) def transcribe(self, audio, init_prompt=""): @@ -97,7 +98,7 @@ class FasterWhisperASR(ASRBase): def load_model(self, modelsize=None, cache_dir=None, model_dir=None): from faster_whisper import WhisperModel if model_dir is not None: - print(f"Loading whisper model from model_dir {model_dir}. modelsize and cache_dir parameters are not used.",file=self.logfile) + logging.debug(f"Loading whisper model from model_dir {model_dir}. modelsize and cache_dir parameters are not used.") model_size_or_path = model_dir elif modelsize is not None: model_size_or_path = modelsize @@ -173,9 +174,11 @@ class HypothesisBuffer: c = " ".join([self.commited_in_buffer[-j][2] for j in range(1,i+1)][::-1]) tail = " ".join(self.new[j-1][2] for j in range(1,i+1)) if c == tail: - print("removing last",i,"words:",file=self.logfile) + words = [] for j in range(i): - print("\t",self.new.pop(0),file=self.logfile) + words.append(repr(self.new.pop(0))) + words_msg = "\t".join(words) + logging.debug(f"removing last {i} words: {words_msg}") break def flush(self): @@ -267,9 +270,9 @@ class OnlineASRProcessor: """ prompt, non_prompt = self.prompt() - print("PROMPT:", prompt, file=self.logfile) - print("CONTEXT:", non_prompt, file=self.logfile) - print(f"transcribing {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f} seconds from {self.buffer_time_offset:2.2f}",file=self.logfile) + logging.debug(f"PROMPT: {prompt}") + logging.debug(f"CONTEXT: {non_prompt}") + logging.debug(f"transcribing {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f} seconds from {self.buffer_time_offset:2.2f}") res = self.asr.transcribe(self.audio_buffer, init_prompt=prompt) # transform to [(beg,end,"word1"), ...] @@ -278,8 +281,10 @@ class OnlineASRProcessor: self.transcript_buffer.insert(tsw, self.buffer_time_offset) o = self.transcript_buffer.flush() self.commited.extend(o) - print(">>>>COMPLETE NOW:",self.to_flush(o),file=self.logfile,flush=True) - print("INCOMPLETE:",self.to_flush(self.transcript_buffer.complete()),file=self.logfile,flush=True) + completed = self.to_flush(o) + logging.debug(f">>>>COMPLETE NOW: {completed}") + the_rest = self.to_flush(self.transcript_buffer.complete()) + logging.debug(f"INCOMPLETE: {the_rest}") # there is a newly confirmed text @@ -303,18 +308,18 @@ class OnlineASRProcessor: #while k>0 and self.commited[k][1] > l: # k -= 1 #t = self.commited[k][1] - print(f"chunking segment",file=self.logfile) + logging.debug(f"chunking segment") #self.chunk_at(t) - print(f"len of buffer now: {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f}",file=self.logfile) + logging.debug(f"len of buffer now: {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f}") return self.to_flush(o) def chunk_completed_sentence(self): if self.commited == []: return - print(self.commited,file=self.logfile) + logging.debug(self.commited) sents = self.words_to_sentences(self.commited) for s in sents: - print("\t\tSENT:",s,file=self.logfile) + logging.debug(f"\t\tSENT: {s}") if len(sents) < 2: return while len(sents) > 2: @@ -322,7 +327,7 @@ class OnlineASRProcessor: # we will continue with audio processing at this timestamp chunk_at = sents[-2][1] - print(f"--- sentence chunked at {chunk_at:2.2f}",file=self.logfile) + logging.debug(f"--- sentence chunked at {chunk_at:2.2f}") self.chunk_at(chunk_at) def chunk_completed_segment(self, res): @@ -339,12 +344,12 @@ class OnlineASRProcessor: ends.pop(-1) e = ends[-2]+self.buffer_time_offset if e <= t: - print(f"--- segment chunked at {e:2.2f}",file=self.logfile) + logging.debug(f"--- segment chunked at {e:2.2f}") self.chunk_at(e) else: - print(f"--- last segment not within commited area",file=self.logfile) + logging.debug(f"--- last segment not within commited area") else: - print(f"--- not enough segments to chunk",file=self.logfile) + logging.debug(f"--- not enough segments to chunk") @@ -391,7 +396,7 @@ class OnlineASRProcessor: """ o = self.transcript_buffer.complete() f = self.to_flush(o) - print("last, noncommited:",f,file=self.logfile) + logging.debug("last, noncommited: {f}") return f @@ -431,7 +436,7 @@ def create_tokenizer(lan): # the following languages are in Whisper, but not in wtpsplit: if lan in "as ba bo br bs fo haw hr ht jw lb ln lo mi nn oc sa sd sn so su sw tk tl tt".split(): - print(f"{lan} code is not supported by wtpsplit. Going to use None lang_code option.", file=sys.stderr) + logging.debug(f"{lan} code is not supported by wtpsplit. Going to use None lang_code option.") lan = None from wtpsplit import WtP @@ -476,20 +481,20 @@ if __name__ == "__main__": logfile = sys.stderr if args.offline and args.comp_unaware: - print("No or one option from --offline and --comp_unaware are available, not both. Exiting.",file=logfile) + logging.error("No or one option from --offline and --comp_unaware are available, not both. Exiting.") sys.exit(1) audio_path = args.audio_path SAMPLING_RATE = 16000 duration = len(load_audio(audio_path))/SAMPLING_RATE - print("Audio duration is: %2.2f seconds" % duration, file=logfile) + logging.info("Audio duration is: %2.2f seconds" % duration) size = args.model language = args.lan t = time.time() - print(f"Loading Whisper {size} model for {language}...",file=logfile,end=" ",flush=True) + logging.info(f"Loading Whisper {size} model for {language}...") if args.backend == "faster-whisper": asr_cls = FasterWhisperASR @@ -506,10 +511,10 @@ if __name__ == "__main__": e = time.time() - print(f"done. It took {round(e-t,2)} seconds.",file=logfile) + logging.info(f"done. It took {round(e-t,2)} seconds.") if args.vad: - print("setting VAD filter",file=logfile) + logging.info("setting VAD filter") asr.use_vad() @@ -543,16 +548,15 @@ if __name__ == "__main__": print("%1.4f %1.0f %1.0f %s" % (now*1000, o[0]*1000,o[1]*1000,o[2]),file=logfile,flush=True) print("%1.4f %1.0f %1.0f %s" % (now*1000, o[0]*1000,o[1]*1000,o[2]),flush=True) else: - print(o,file=logfile,flush=True) + print("here?", o,file=logfile,flush=True) if args.offline: ## offline mode processing (for testing/debugging) a = load_audio(audio_path) online.insert_audio_chunk(a) try: o = online.process_iter() - except AssertionError: - print("assertion error",file=logfile) - pass + except AssertionError as e: + log.error(f"assertion error: {repr(e)}") else: output_transcript(o) now = None @@ -563,13 +567,13 @@ if __name__ == "__main__": online.insert_audio_chunk(a) try: o = online.process_iter() - except AssertionError: - print("assertion error",file=logfile) + except AssertionError as e: + logging.error(f"assertion error: {repr(e)}") pass else: output_transcript(o, now=end) - print(f"## last processed {end:.2f}s",file=logfile,flush=True) + logging.debug(f"## last processed {end:.2f}s") if end >= duration: break @@ -595,13 +599,13 @@ if __name__ == "__main__": try: o = online.process_iter() - except AssertionError: - print("assertion error",file=logfile) + except AssertionError as e: + logging.error(f"assertion error: {e}") pass else: output_transcript(o) now = time.time() - start - print(f"## last processed {end:.2f} s, now is {now:.2f}, the latency is {now-end:.2f}",file=logfile,flush=True) + logging.debug(f"## last processed {end:.2f} s, now is {now:.2f}, the latency is {now-end:.2f}") if end >= duration: break diff --git a/whisper_online_server.py b/whisper_online_server.py index 98022e0..9ea8b74 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -39,6 +39,7 @@ logging.debug(f"Loading Whisper {size} model for {language}...") if args.backend == "faster-whisper": from faster_whisper import WhisperModel asr_cls = FasterWhisperASR + logging.getLogger("faster_whisper").setLevel(logging.WARNING) else: import whisper import whisper_timestamped @@ -80,7 +81,7 @@ if os.path.exists(demo_audio_path): # warm up the ASR, because the very first transcribe takes much more time than the other asr.transcribe(a) else: - logging.info("Whisper is not warmed up") + logging.debug("Whisper is not warmed up") ######### Server objects @@ -135,8 +136,6 @@ class ServerProcessor: out = [] while sum(len(x) for x in out) < self.min_chunk*SAMPLING_RATE: raw_bytes = self.connection.non_blocking_receive_audio() - print(raw_bytes[:10]) - print(len(raw_bytes)) if not raw_bytes: break sf = soundfile.SoundFile(io.BytesIO(raw_bytes), channels=1,endian="LITTLE",samplerate=SAMPLING_RATE, subtype="PCM_16",format="RAW") @@ -167,7 +166,7 @@ class ServerProcessor: print("%1.0f %1.0f %s" % (beg,end,o[2]),flush=True,file=sys.stderr) return "%1.0f %1.0f %s" % (beg,end,o[2]) else: - print(o,file=sys.stderr,flush=True) + # No text, so no output return None def send_result(self, o): @@ -181,14 +180,13 @@ class ServerProcessor: while True: a = self.receive_audio_chunk() if a is None: - print("break here",file=sys.stderr) break self.online_asr_proc.insert_audio_chunk(a) o = online.process_iter() try: self.send_result(o) except BrokenPipeError: - print("broken pipe -- connection closed?",file=sys.stderr) + logging.info("broken pipe -- connection closed?") break # o = online.finish() # this should be working From cc56fdd931a9a662684bf0daf718460808a02fae Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:17:53 +0100 Subject: [PATCH 27/43] Add requirements.txt and a cuda_requirements.txt that's up to date --- cuda_requirements.txt | 2 ++ requirements.txt | 3 +++ 2 files changed, 5 insertions(+) create mode 100644 cuda_requirements.txt create mode 100644 requirements.txt diff --git a/cuda_requirements.txt b/cuda_requirements.txt new file mode 100644 index 0000000..720c2fb --- /dev/null +++ b/cuda_requirements.txt @@ -0,0 +1,2 @@ +nvidia-cublas-cu12 +nvidia-cudnn-cu11==8.9.6.50 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..11340c8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +librosa +opus-fast-mosestokenizer +faster-whisper From 70bc57180c172f1829eaccff81e3d90132282cc4 Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:29:46 +0100 Subject: [PATCH 28/43] Add a --warmup-file option to pass in a path --- whisper_online_server.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index b2f5120..9a4cc0b 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -10,6 +10,8 @@ parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default='localhost') parser.add_argument("--port", type=int, default=43007) +parser.add_argument("--warmup-file", type=str, dest="warmup_file") + # options from whisper_online add_shared_args(parser) @@ -61,10 +63,9 @@ online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, -demo_audio_path = "cs-maji-2.16k.wav" -if os.path.exists(demo_audio_path): +if os.path.exists(args.warmup_file): # load the audio into the LRU cache before we start the timer - a = load_audio_chunk(demo_audio_path,0,1) + a = load_audio_chunk(args.warmup_file,0,1) # TODO: it should be tested whether it's meaningful # warm up the ASR, because the very first transcribe takes much more time than the other From 23a018d341592498f866fa8b195b69850fc5d2e2 Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:33:54 +0100 Subject: [PATCH 29/43] Add some logging around warmup --- whisper_online_server.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/whisper_online_server.py b/whisper_online_server.py index 9ea8b74..247f68e 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -75,11 +75,13 @@ online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, demo_audio_path = "cs-maji-2.16k.wav" if os.path.exists(demo_audio_path): # load the audio into the LRU cache before we start the timer + logging.debug(f"Warming up on {demo_audio_path}") a = load_audio_chunk(demo_audio_path,0,1) # TODO: it should be tested whether it's meaningful # warm up the ASR, because the very first transcribe takes much more time than the other asr.transcribe(a) + logging.debug("Whisper is warmed up") else: logging.debug("Whisper is not warmed up") From fc4b3cd5188664e1ca64475594909b8820762448 Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:38:41 +0100 Subject: [PATCH 30/43] Check whether we are passed a warmup file before trying to see if it exists --- whisper_online_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index 9a4cc0b..fdd6a46 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -63,7 +63,7 @@ online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, -if os.path.exists(args.warmup_file): +if args.warmup_file and os.path.exists(args.warmup_file): # load the audio into the LRU cache before we start the timer a = load_audio_chunk(args.warmup_file,0,1) From 626dedf2f5fdccc4b7ebc586ebecbc00b9396552 Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:45:44 +0100 Subject: [PATCH 31/43] Remove 'INFO:' from a few log strings --- whisper_online_server.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index 247f68e..59454cf 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -202,13 +202,13 @@ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((args.host, args.port)) s.listen(1) - logging.info('INFO: Listening on'+str((args.host, args.port))) + logging.info('Listening on'+str((args.host, args.port))) while True: conn, addr = s.accept() - logging.info('INFO: Connected to client on {}'.format(addr)) + logging.info('Connected to client on {}'.format(addr)) connection = Connection(conn) proc = ServerProcessor(connection, online, min_chunk) proc.process() conn.close() - logging.info('INFO: Connection to client closed') -logging.info('INFO: Connection closed, terminating.') + logging.info('Connection to client closed') +logging.info('Connection closed, terminating.') From 2afc97db4813a6c947da561b8148c3404a252dfd Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 20:16:28 +0100 Subject: [PATCH 32/43] Set the log level inside faster-whisper again (lost in merge) --- whisper_online.py | 1 + 1 file changed, 1 insertion(+) diff --git a/whisper_online.py b/whisper_online.py index d1a91a5..82b2c7a 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -104,6 +104,7 @@ class FasterWhisperASR(ASRBase): def load_model(self, modelsize=None, cache_dir=None, model_dir=None): from faster_whisper import WhisperModel + logging.getLogger("faster_whisper").setLevel(logging.WARNING) if model_dir is not None: logging.debug(f"Loading whisper model from model_dir {model_dir}. modelsize and cache_dir parameters are not used.") model_size_or_path = model_dir From e0f5d42b134988996f668ad2ac1b30fb6d5d6420 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Wed, 17 Apr 2024 14:49:12 +0200 Subject: [PATCH 33/43] better documentation, help message and logging prints --- README.md | 2 +- whisper_online.py | 2 +- whisper_online_server.py | 27 +++++++++++++-------------- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 707e58d..e3a7a27 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ online.init() # refresh if you're going to re-use the object for the next audio ### Server -- real-time from mic -`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection. See help message (`-h` option). +`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection and the `--warmup-file`. See the help message (`-h` option). Client example: diff --git a/whisper_online.py b/whisper_online.py index a00547e..c872b23 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -625,7 +625,7 @@ if __name__ == "__main__": # load the audio into the LRU cache before we start the timer a = load_audio_chunk(audio_path,0,1) - # warm up the ASR, because the very first transcribe takes much more time than the other + # warm up the ASR because the very first transcribe takes much more time than the other asr.transcribe(a) beg = args.start_at diff --git a/whisper_online_server.py b/whisper_online_server.py index e7ad3f2..263ab75 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -10,8 +10,8 @@ parser = argparse.ArgumentParser() # server options parser.add_argument("--host", type=str, default='localhost') parser.add_argument("--port", type=int, default=43007) - -parser.add_argument("--warmup-file", type=str, dest="warmup_file") +parser.add_argument("--warmup-file", type=str, dest="warmup_file", + help="The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast. It can be e.g. https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav .") # options from whisper_online @@ -41,19 +41,18 @@ else: tokenizer = None online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) - - -if args.warmup_file and os.path.exists(args.warmup_file): - # load the audio into the LRU cache before we start the timer - a = load_audio_chunk(args.warmup_file,0,1) - - # TODO: it should be tested whether it's meaningful - # warm up the ASR, because the very first transcribe takes much more time than the other - asr.transcribe(a) +# warm up the ASR because the very first transcribe takes more time than the others. +# Test results in https://github.com/ufal/whisper_streaming/pull/81 +msg = "Whisper is not warmed up. The first chunk processing may take longer." +if args.warmup_file: + if os.path.isfile(args.warmup_file): + a = load_audio_chunk(args.warmup_file,0,1) + asr.transcribe(a) + print("INFO: Whisper is warmed up.",file=sys.stderr) + else: + print("WARNING: The warm up file is not available. "+msg,file=sys.stderr) else: - print("Whisper is not warmed up",file=sys.stderr) - - + print("WARNING: " + msg, file=sys.stderr) ######### Server objects From dcddb17de832510562e84f92d29a1d9402c1acfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Wed, 17 Apr 2024 15:25:19 +0200 Subject: [PATCH 34/43] UEDIN ack in line_packet.py --- line_packet.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/line_packet.py b/line_packet.py index 364ade2..e73845a 100644 --- a/line_packet.py +++ b/line_packet.py @@ -2,8 +2,6 @@ """Functions for sending and receiving individual lines of text over a socket. -Used by marian-server-server.py to communicate with the Marian worker. - A line is transmitted using one or more fixed-size packets of UTF-8 bytes containing: @@ -11,6 +9,7 @@ containing: - Zero or more \0 bytes as required to pad the packet to PACKET_SIZE +Originally from the UEDIN team of the ELITR project. """ PACKET_SIZE = 65536 From 97a4ebdf159dfdff281f209ffe4e3833c012fafe Mon Sep 17 00:00:00 2001 From: Alex Young Date: Wed, 17 Apr 2024 21:58:24 +0100 Subject: [PATCH 35/43] Construct an explicit logger rather than using the root logger --- whisper_online.py | 60 +++++++++++++++++++++------------------- whisper_online_server.py | 19 +++++-------- 2 files changed, 38 insertions(+), 41 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 25ee2dc..7a55957 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -11,6 +11,8 @@ import io import soundfile as sf import math +logger = logging.getLogger(__name__) + @lru_cache def load_audio(fname): a, _ = librosa.load(fname, sr=16000, dtype=np.float32) @@ -65,7 +67,7 @@ class WhisperTimestampedASR(ASRBase): from whisper_timestamped import transcribe_timestamped self.transcribe_timestamped = transcribe_timestamped if model_dir is not None: - logging.debug("ignoring model_dir, not implemented") + logger.debug("ignoring model_dir, not implemented") return whisper.load_model(modelsize, download_root=cache_dir) def transcribe(self, audio, init_prompt=""): @@ -106,7 +108,7 @@ class FasterWhisperASR(ASRBase): from faster_whisper import WhisperModel logging.getLogger("faster_whisper").setLevel(logging.WARNING) if model_dir is not None: - logging.debug(f"Loading whisper model from model_dir {model_dir}. modelsize and cache_dir parameters are not used.") + logger.debug(f"Loading whisper model from model_dir {model_dir}. modelsize and cache_dir parameters are not used.") model_size_or_path = model_dir elif modelsize is not None: model_size_or_path = modelsize @@ -229,7 +231,7 @@ class OpenaiApiASR(ASRBase): # Process transcription/translation transcript = proc.create(**params) - logging.debug(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds") + logger.debug(f"OpenAI API processed accumulated {self.transcribed_seconds} seconds") return transcript @@ -276,7 +278,7 @@ class HypothesisBuffer: for j in range(i): words.append(repr(self.new.pop(0))) words_msg = "\t".join(words) - logging.debug(f"removing last {i} words: {words_msg}") + logger.debug(f"removing last {i} words: {words_msg}") break def flush(self): @@ -365,9 +367,9 @@ class OnlineASRProcessor: """ prompt, non_prompt = self.prompt() - logging.debug(f"PROMPT: {prompt}") - logging.debug(f"CONTEXT: {non_prompt}") - logging.debug(f"transcribing {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f} seconds from {self.buffer_time_offset:2.2f}") + logger.debug(f"PROMPT: {prompt}") + logger.debug(f"CONTEXT: {non_prompt}") + logger.debug(f"transcribing {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f} seconds from {self.buffer_time_offset:2.2f}") res = self.asr.transcribe(self.audio_buffer, init_prompt=prompt) # transform to [(beg,end,"word1"), ...] @@ -377,9 +379,9 @@ class OnlineASRProcessor: o = self.transcript_buffer.flush() self.commited.extend(o) completed = self.to_flush(o) - logging.debug(f">>>>COMPLETE NOW: {completed}") + logger.debug(f">>>>COMPLETE NOW: {completed}") the_rest = self.to_flush(self.transcript_buffer.complete()) - logging.debug(f"INCOMPLETE: {the_rest}") + logger.debug(f"INCOMPLETE: {the_rest}") # there is a newly confirmed text @@ -403,18 +405,18 @@ class OnlineASRProcessor: #while k>0 and self.commited[k][1] > l: # k -= 1 #t = self.commited[k][1] - logging.debug(f"chunking segment") + logger.debug(f"chunking segment") #self.chunk_at(t) - logging.debug(f"len of buffer now: {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f}") + logger.debug(f"len of buffer now: {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f}") return self.to_flush(o) def chunk_completed_sentence(self): if self.commited == []: return - logging.debug(self.commited) + logger.debug(self.commited) sents = self.words_to_sentences(self.commited) for s in sents: - logging.debug(f"\t\tSENT: {s}") + logger.debug(f"\t\tSENT: {s}") if len(sents) < 2: return while len(sents) > 2: @@ -422,7 +424,7 @@ class OnlineASRProcessor: # we will continue with audio processing at this timestamp chunk_at = sents[-2][1] - logging.debug(f"--- sentence chunked at {chunk_at:2.2f}") + logger.debug(f"--- sentence chunked at {chunk_at:2.2f}") self.chunk_at(chunk_at) def chunk_completed_segment(self, res): @@ -439,12 +441,12 @@ class OnlineASRProcessor: ends.pop(-1) e = ends[-2]+self.buffer_time_offset if e <= t: - logging.debug(f"--- segment chunked at {e:2.2f}") + logger.debug(f"--- segment chunked at {e:2.2f}") self.chunk_at(e) else: - logging.debug(f"--- last segment not within commited area") + logger.debug(f"--- last segment not within commited area") else: - logging.debug(f"--- not enough segments to chunk") + logger.debug(f"--- not enough segments to chunk") @@ -490,7 +492,7 @@ class OnlineASRProcessor: """ o = self.transcript_buffer.complete() f = self.to_flush(o) - logging.debug("last, noncommited: {f}") + logger.debug("last, noncommited: {f}") return f @@ -530,7 +532,7 @@ def create_tokenizer(lan): # the following languages are in Whisper, but not in wtpsplit: if lan in "as ba bo br bs fo haw hr ht jw lb ln lo mi nn oc sa sd sn so su sw tk tl tt".split(): - logging.debug(f"{lan} code is not supported by wtpsplit. Going to use None lang_code option.") + logger.debug(f"{lan} code is not supported by wtpsplit. Going to use None lang_code option.") lan = None from wtpsplit import WtP @@ -563,7 +565,7 @@ def asr_factory(args, logfile=sys.stderr): """ backend = args.backend if backend == "openai-api": - logging.debug("Using OpenAI API.") + logger.debug("Using OpenAI API.") asr = OpenaiApiASR(lan=args.lan) else: if backend == "faster-whisper": @@ -574,14 +576,14 @@ def asr_factory(args, logfile=sys.stderr): # Only for FasterWhisperASR and WhisperTimestampedASR size = args.model t = time.time() - logging.debug(f"Loading Whisper {size} model for {args.lan}...") + logger.debug(f"Loading Whisper {size} model for {args.lan}...") asr = asr_cls(modelsize=size, lan=args.lan, cache_dir=args.model_cache_dir, model_dir=args.model_dir) e = time.time() - logging.debug(f"done. It took {round(e-t,2)} seconds.") + logger.debug(f"done. It took {round(e-t,2)} seconds.") # Apply common configurations if getattr(args, 'vad', False): # Checks if VAD argument is present and True - logging.info("Setting VAD filter") + logger.info("Setting VAD filter") asr.use_vad() language = args.lan @@ -619,14 +621,14 @@ if __name__ == "__main__": logfile = sys.stderr if args.offline and args.comp_unaware: - logging.error("No or one option from --offline and --comp_unaware are available, not both. Exiting.") + logger.error("No or one option from --offline and --comp_unaware are available, not both. Exiting.") sys.exit(1) audio_path = args.audio_path SAMPLING_RATE = 16000 duration = len(load_audio(audio_path))/SAMPLING_RATE - logging.info("Audio duration is: %2.2f seconds" % duration) + logger.info("Audio duration is: %2.2f seconds" % duration) asr, online = asr_factory(args, logfile=logfile) min_chunk = args.min_chunk_size @@ -674,12 +676,12 @@ if __name__ == "__main__": try: o = online.process_iter() except AssertionError as e: - logging.error(f"assertion error: {repr(e)}") + logger.error(f"assertion error: {repr(e)}") pass else: output_transcript(o, now=end) - logging.debug(f"## last processed {end:.2f}s") + logger.debug(f"## last processed {end:.2f}s") if end >= duration: break @@ -706,12 +708,12 @@ if __name__ == "__main__": try: o = online.process_iter() except AssertionError as e: - logging.error(f"assertion error: {e}") + logger.error(f"assertion error: {e}") pass else: output_transcript(o) now = time.time() - start - logging.debug(f"## last processed {end:.2f} s, now is {now:.2f}, the latency is {now-end:.2f}") + logger.debug(f"## last processed {end:.2f} s, now is {now:.2f}, the latency is {now-end:.2f}") if end >= duration: break diff --git a/whisper_online_server.py b/whisper_online_server.py index b97c763..6b08f46 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -7,6 +7,8 @@ import os import logging import numpy as np +logger = logging.getLogger(__name__) +print(__name__) parser = argparse.ArgumentParser() # server options @@ -38,13 +40,6 @@ language = args.lan asr, online = asr_factory(args) min_chunk = args.min_chunk_size - -if args.buffer_trimming == "sentence": - tokenizer = create_tokenizer(tgt_language) -else: - tokenizer = None -online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) - # warm up the ASR because the very first transcribe takes more time than the others. # Test results in https://github.com/ufal/whisper_streaming/pull/81 msg = "Whisper is not warmed up. The first chunk processing may take longer." @@ -161,7 +156,7 @@ class ServerProcessor: try: self.send_result(o) except BrokenPipeError: - logging.info("broken pipe -- connection closed?") + logger.info("broken pipe -- connection closed?") break # o = online.finish() # this should be working @@ -175,13 +170,13 @@ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((args.host, args.port)) s.listen(1) - logging.info('Listening on'+str((args.host, args.port))) + logger.info('Listening on'+str((args.host, args.port))) while True: conn, addr = s.accept() - logging.info('Connected to client on {}'.format(addr)) + logger.info('Connected to client on {}'.format(addr)) connection = Connection(conn) proc = ServerProcessor(connection, online, min_chunk) proc.process() conn.close() - logging.info('Connection to client closed') -logging.info('Connection closed, terminating.') + logger.info('Connection to client closed') +logger.info('Connection closed, terminating.') From df64b4e2c31d17087c943aad23fe27595576ac2e Mon Sep 17 00:00:00 2001 From: Alex Young Date: Wed, 17 Apr 2024 22:03:59 +0100 Subject: [PATCH 36/43] Remove requirements.txt files --- cuda_requirements.txt | 2 -- requirements.txt | 3 --- 2 files changed, 5 deletions(-) delete mode 100644 cuda_requirements.txt delete mode 100644 requirements.txt diff --git a/cuda_requirements.txt b/cuda_requirements.txt deleted file mode 100644 index 720c2fb..0000000 --- a/cuda_requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -nvidia-cublas-cu12 -nvidia-cudnn-cu11==8.9.6.50 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 11340c8..0000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -librosa -opus-fast-mosestokenizer -faster-whisper From 8060d45aea235010a7cf8eb4f2068a446088a2bc Mon Sep 17 00:00:00 2001 From: Alex Young Date: Wed, 17 Apr 2024 22:21:41 +0100 Subject: [PATCH 37/43] Default log level to DEBUG, faster-whisper to match --- whisper_online.py | 7 ++++++- whisper_online_server.py | 14 ++++---------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index 7a55957..c43253e 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -106,7 +106,7 @@ class FasterWhisperASR(ASRBase): def load_model(self, modelsize=None, cache_dir=None, model_dir=None): from faster_whisper import WhisperModel - logging.getLogger("faster_whisper").setLevel(logging.WARNING) + logging.getLogger("faster_whisper").setLevel(logger.level) if model_dir is not None: logger.debug(f"Loading whisper model from model_dir {model_dir}. modelsize and cache_dir parameters are not used.") model_size_or_path = model_dir @@ -558,6 +558,7 @@ def add_shared_args(parser): parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.') parser.add_argument('--buffer_trimming', type=str, default="segment", choices=["sentence", "segment"],help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.') parser.add_argument('--buffer_trimming_sec', type=float, default=15, help='Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.') + parser.add_argument("-l", "--log-level", dest="log_level", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help="Set the log level", default='DEBUG') def asr_factory(args, logfile=sys.stderr): """ @@ -624,6 +625,10 @@ if __name__ == "__main__": logger.error("No or one option from --offline and --comp_unaware are available, not both. Exiting.") sys.exit(1) + if args.log_level: + logging.basicConfig(format='whisper-%(levelname)s:%(name)s: %(message)s', + level=getattr(logging, args.log_level)) + audio_path = args.audio_path SAMPLING_RATE = 16000 diff --git a/whisper_online_server.py b/whisper_online_server.py index 6b08f46..e852192 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -8,7 +8,6 @@ import logging import numpy as np logger = logging.getLogger(__name__) -print(__name__) parser = argparse.ArgumentParser() # server options @@ -17,18 +16,13 @@ parser.add_argument("--port", type=int, default=43007) parser.add_argument("--warmup-file", type=str, dest="warmup_file", help="The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast. It can be e.g. https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav .") -parser.add_argument("-l", "--log-level", dest="log_level", - choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], - help="Set the log level", - default='INFO') - # options from whisper_online add_shared_args(parser) args = parser.parse_args() if args.log_level: - logging.basicConfig(format='whisper-server-%(levelname)s: %(message)s', + logging.basicConfig(format='whisper-server-%(levelname)s:%(name)s: %(message)s', level=getattr(logging, args.log_level)) # setting whisper object by args @@ -47,11 +41,11 @@ if args.warmup_file: if os.path.isfile(args.warmup_file): a = load_audio_chunk(args.warmup_file,0,1) asr.transcribe(a) - print("INFO: Whisper is warmed up.",file=sys.stderr) + logger.info("Whisper is warmed up.") else: - print("WARNING: The warm up file is not available. "+msg,file=sys.stderr) + logger.warning("The warm up file is not available. "+msg) else: - print("WARNING: " + msg, file=sys.stderr) + logger.warning(msg) ######### Server objects From 7286dfdfa1dae69d0b6b7b23b8eda0830f2615ed Mon Sep 17 00:00:00 2001 From: Alex Young Date: Wed, 17 Apr 2024 22:40:18 +0100 Subject: [PATCH 38/43] Add a debug log line when no text is detected --- whisper_online_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index e852192..dfaace3 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -130,7 +130,7 @@ class ServerProcessor: print("%1.0f %1.0f %s" % (beg,end,o[2]),flush=True,file=sys.stderr) return "%1.0f %1.0f %s" % (beg,end,o[2]) else: - # No text, so no output + logger.debug("No text in this segment") return None def send_result(self, o): From b50f68749b0dd999a3e2ee6607ed195f600e5896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Thu, 18 Apr 2024 18:10:25 +0200 Subject: [PATCH 39/43] checks and changes in logging - don't set the level for submodules, it's too verbose - etc. --- whisper_online.py | 28 +++++++++++++++++++--------- whisper_online_server.py | 7 +++---- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/whisper_online.py b/whisper_online.py index c43253e..25f5396 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -106,7 +106,7 @@ class FasterWhisperASR(ASRBase): def load_model(self, modelsize=None, cache_dir=None, model_dir=None): from faster_whisper import WhisperModel - logging.getLogger("faster_whisper").setLevel(logger.level) +# logging.getLogger("faster_whisper").setLevel(logger.level) if model_dir is not None: logger.debug(f"Loading whisper model from model_dir {model_dir}. modelsize and cache_dir parameters are not used.") model_size_or_path = model_dir @@ -277,7 +277,7 @@ class HypothesisBuffer: words = [] for j in range(i): words.append(repr(self.new.pop(0))) - words_msg = "\t".join(words) + words_msg = " ".join(words) logger.debug(f"removing last {i} words: {words_msg}") break @@ -405,7 +405,7 @@ class OnlineASRProcessor: #while k>0 and self.commited[k][1] > l: # k -= 1 #t = self.commited[k][1] - logger.debug(f"chunking segment") + logger.debug("chunking segment") #self.chunk_at(t) logger.debug(f"len of buffer now: {len(self.audio_buffer)/self.SAMPLING_RATE:2.2f}") @@ -577,10 +577,10 @@ def asr_factory(args, logfile=sys.stderr): # Only for FasterWhisperASR and WhisperTimestampedASR size = args.model t = time.time() - logger.debug(f"Loading Whisper {size} model for {args.lan}...") + logger.info(f"Loading Whisper {size} model for {args.lan}...") asr = asr_cls(modelsize=size, lan=args.lan, cache_dir=args.model_cache_dir, model_dir=args.model_dir) e = time.time() - logger.debug(f"done. It took {round(e-t,2)} seconds.") + logger.info(f"done. It took {round(e-t,2)} seconds.") # Apply common configurations if getattr(args, 'vad', False): # Checks if VAD argument is present and True @@ -604,7 +604,15 @@ def asr_factory(args, logfile=sys.stderr): online = OnlineASRProcessor(asr,tokenizer,logfile=logfile,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) return asr, online -## main: + +def set_logging(args,logger,other="_server"): + logging.basicConfig(#format='%(name)s + format='%(levelname)s\t%(message)s') + logger.setLevel(args.log_level) + logging.getLogger("whisper_online"+other).setLevel(args.log_level) +# logging.getLogger("whisper_online_server").setLevel(args.log_level) + + if __name__ == "__main__": @@ -625,9 +633,11 @@ if __name__ == "__main__": logger.error("No or one option from --offline and --comp_unaware are available, not both. Exiting.") sys.exit(1) - if args.log_level: - logging.basicConfig(format='whisper-%(levelname)s:%(name)s: %(message)s', - level=getattr(logging, args.log_level)) +# if args.log_level: +# logging.basicConfig(format='whisper-%(levelname)s:%(name)s: %(message)s', +# level=getattr(logging, args.log_level)) + + set_logging(args,logger) audio_path = args.audio_path diff --git a/whisper_online_server.py b/whisper_online_server.py index dfaace3..c8dc335 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -21,9 +21,7 @@ parser.add_argument("--warmup-file", type=str, dest="warmup_file", add_shared_args(parser) args = parser.parse_args() -if args.log_level: - logging.basicConfig(format='whisper-server-%(levelname)s:%(name)s: %(message)s', - level=getattr(logging, args.log_level)) +set_logging(args,logger,other="") # setting whisper object by args @@ -43,7 +41,8 @@ if args.warmup_file: asr.transcribe(a) logger.info("Whisper is warmed up.") else: - logger.warning("The warm up file is not available. "+msg) + logger.critical("The warm up file is not available. "+msg) + sys.exit(1) else: logger.warning(msg) From 264b8a32c2bc53a69e467f67da2a200ca4e008a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Thu, 2 May 2024 10:32:53 +0200 Subject: [PATCH 40/43] forgot f in the last debug print --- whisper_online.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/whisper_online.py b/whisper_online.py index 25f5396..d0bd380 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -492,7 +492,7 @@ class OnlineASRProcessor: """ o = self.transcript_buffer.complete() f = self.to_flush(o) - logger.debug("last, noncommited: {f}") + logger.debug(f"last, noncommited: {f}") return f From 7bca7a2b8e45c7822d17b74aaa505d5e643ffeb6 Mon Sep 17 00:00:00 2001 From: wenli Date: Wed, 15 May 2024 23:49:24 +0800 Subject: [PATCH 41/43] Update whisper_online.py --- whisper_online.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/whisper_online.py b/whisper_online.py index d0bd380..cf94fed 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -679,7 +679,7 @@ if __name__ == "__main__": try: o = online.process_iter() except AssertionError as e: - log.error(f"assertion error: {repr(e)}") + logger.error(f"assertion error: {repr(e)}") else: output_transcript(o) now = None From 9c15262015847cf9dd403489dae2b1216962c85c Mon Sep 17 00:00:00 2001 From: Doiiars Date: Mon, 20 May 2024 10:50:33 +0800 Subject: [PATCH 42/43] fix re-creation bug fix re-creation bug --- whisper_online_server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index c8dc335..bcb934b 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -160,7 +160,6 @@ class ServerProcessor: # server loop with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((args.host, args.port)) s.listen(1) logger.info('Listening on'+str((args.host, args.port))) From 84a999570a1bd66ca4ea423b8e5f3c62eebb03f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Tue, 13 Aug 2024 14:53:28 +0200 Subject: [PATCH 43/43] link to a nice video --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e3a7a27..eb0ec35 100644 --- a/README.md +++ b/README.md @@ -233,6 +233,7 @@ overlap, and we limit the processing buffer window. Contributions are welcome. We acknowledge especially: - [The GitHub contributors](https://github.com/ufal/whisper_streaming/graphs/contributors) for their pull requests with new features and bugfixes. +- [Nice explanation video](https://www.youtube.com/watch?v=_spinzpEeFM) -- published on 31st March 2024, not that newer updates are not included. - [The translation of this repo into Chinese.](https://github.com/Gloridust/whisper_streaming_CN) - [Ondřej Plátek](https://opla.cz/) for the paper pre-review. - [Peter Polák](https://ufal.mff.cuni.cz/peter-polak) for the original idea.