From b3647da0875f9f4f5d2e57b0a57efdc1cf5cfa10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Fri, 29 Mar 2024 09:19:59 +0100 Subject: [PATCH 1/6] Update README.md PDF link --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 16d3a23..11cc555 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,9 @@ Demonstration paper, by Dominik Macháček, Raj Dabre, Ondřej Bojar, 2023 Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference. -Paper in proceedings: http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/pdf/2023.ijcnlp-demo.3.pdf +Paper PDF: +https://aclanthology.org/2023.ijcnlp-demo.3.pdf + Demo video: https://player.vimeo.com/video/840442741 From 8223afee7888607879870ae11acd246e4f7a6ae2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tobias=20G=C3=A5rdhus?= Date: Fri, 29 Mar 2024 19:35:30 +0100 Subject: [PATCH 2/6] Update README.md Add Python syntax highlighting to code chunk --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 11cc555..ae63e19 100644 --- a/README.md +++ b/README.md @@ -159,7 +159,7 @@ The code whisper_online.py is nicely commented, read it as the full documentatio This pseudocode describes the interface that we suggest for your implementation. You can implement any features that you need for your application. -``` +```python from whisper_online import * src_lan = "en" # source language From d497503b5c86b6e4cf21eb9f1f4eaa4e60e1b1f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Wed, 10 Apr 2024 18:13:07 +0200 Subject: [PATCH 3/6] COntributions at README.md + nicer formatting + #77 --- README.md | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index ae63e19..707e58d 100644 --- a/README.md +++ b/README.md @@ -3,16 +3,12 @@ Whisper realtime streaming for long speech-to-text transcription and translation **Turning Whisper into Real-Time Transcription System** -Demonstration paper, by Dominik Macháček, Raj Dabre, Ondřej Bojar, 2023 +Demonstration paper, by [Dominik Macháček](https://ufal.mff.cuni.cz/dominik-machacek), [Raj Dabre](https://prajdabre.github.io/), [Ondřej Bojar](https://ufal.mff.cuni.cz/ondrej-bojar), 2023 -Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference. +Abstract: Whisper is one of the recent state-of-the-art multilingual speech recognition and translation models, however, it is not designed for real-time transcription. In this paper, we build on top of Whisper and create Whisper-Streaming, an implementation of real-time speech transcription and translation of Whisper-like models. Whisper-Streaming uses local agreement policy with self-adaptive latency to enable streaming transcription. We show that Whisper-Streaming achieves high quality and 3.3 seconds latency on unsegmented long-form speech transcription test set, and we demonstrate its robustness and practical usability as a component in live transcription service at a multilingual conference. -Paper PDF: -https://aclanthology.org/2023.ijcnlp-demo.3.pdf - - -Demo video: https://player.vimeo.com/video/840442741 +[Paper PDF](https://aclanthology.org/2023.ijcnlp-demo.3.pdf), [Demo video](https://player.vimeo.com/video/840442741) [Slides](http://ufallab.ms.mff.cuni.cz/~machacek/pre-prints/AACL23-2.11.2023-Turning-Whisper-oral.pdf) -- 15 minutes oral presentation at IJCNLP-AACL 2023 @@ -228,12 +224,20 @@ In more detail: we use the init prompt, we handle the inaccurate timestamps, we re-process confirmed sentence prefixes and skip them, making sure they don't overlap, and we limit the processing buffer window. -Contributions are welcome. - ### Performance evaluation [See the paper.](http://www.afnlp.org/conferences/ijcnlp2023/proceedings/main-demo/cdrom/pdf/2023.ijcnlp-demo.3.pdf) +### Contributions + +Contributions are welcome. We acknowledge especially: + +- [The GitHub contributors](https://github.com/ufal/whisper_streaming/graphs/contributors) for their pull requests with new features and bugfixes. +- [The translation of this repo into Chinese.](https://github.com/Gloridust/whisper_streaming_CN) +- [Ondřej Plátek](https://opla.cz/) for the paper pre-review. +- [Peter Polák](https://ufal.mff.cuni.cz/peter-polak) for the original idea. +- The UEDIN team of the [ELITR project](https://elitr.eu) for the original line_packet.py. + ## Contact From 70bc57180c172f1829eaccff81e3d90132282cc4 Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:29:46 +0100 Subject: [PATCH 4/6] Add a --warmup-file option to pass in a path --- whisper_online_server.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index b2f5120..9a4cc0b 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -10,6 +10,8 @@ parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default='localhost') parser.add_argument("--port", type=int, default=43007) +parser.add_argument("--warmup-file", type=str, dest="warmup_file") + # options from whisper_online add_shared_args(parser) @@ -61,10 +63,9 @@ online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, -demo_audio_path = "cs-maji-2.16k.wav" -if os.path.exists(demo_audio_path): +if os.path.exists(args.warmup_file): # load the audio into the LRU cache before we start the timer - a = load_audio_chunk(demo_audio_path,0,1) + a = load_audio_chunk(args.warmup_file,0,1) # TODO: it should be tested whether it's meaningful # warm up the ASR, because the very first transcribe takes much more time than the other From fc4b3cd5188664e1ca64475594909b8820762448 Mon Sep 17 00:00:00 2001 From: Alex Young Date: Sun, 14 Apr 2024 19:38:41 +0100 Subject: [PATCH 5/6] Check whether we are passed a warmup file before trying to see if it exists --- whisper_online_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/whisper_online_server.py b/whisper_online_server.py index 9a4cc0b..fdd6a46 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -63,7 +63,7 @@ online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, -if os.path.exists(args.warmup_file): +if args.warmup_file and os.path.exists(args.warmup_file): # load the audio into the LRU cache before we start the timer a = load_audio_chunk(args.warmup_file,0,1) From e0f5d42b134988996f668ad2ac1b30fb6d5d6420 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Mach=C3=A1=C4=8Dek?= Date: Wed, 17 Apr 2024 14:49:12 +0200 Subject: [PATCH 6/6] better documentation, help message and logging prints --- README.md | 2 +- whisper_online.py | 2 +- whisper_online_server.py | 27 +++++++++++++-------------- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 707e58d..e3a7a27 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ online.init() # refresh if you're going to re-use the object for the next audio ### Server -- real-time from mic -`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection. See help message (`-h` option). +`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection and the `--warmup-file`. See the help message (`-h` option). Client example: diff --git a/whisper_online.py b/whisper_online.py index a00547e..c872b23 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -625,7 +625,7 @@ if __name__ == "__main__": # load the audio into the LRU cache before we start the timer a = load_audio_chunk(audio_path,0,1) - # warm up the ASR, because the very first transcribe takes much more time than the other + # warm up the ASR because the very first transcribe takes much more time than the other asr.transcribe(a) beg = args.start_at diff --git a/whisper_online_server.py b/whisper_online_server.py index e7ad3f2..263ab75 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -10,8 +10,8 @@ parser = argparse.ArgumentParser() # server options parser.add_argument("--host", type=str, default='localhost') parser.add_argument("--port", type=int, default=43007) - -parser.add_argument("--warmup-file", type=str, dest="warmup_file") +parser.add_argument("--warmup-file", type=str, dest="warmup_file", + help="The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast. It can be e.g. https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav .") # options from whisper_online @@ -41,19 +41,18 @@ else: tokenizer = None online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) - - -if args.warmup_file and os.path.exists(args.warmup_file): - # load the audio into the LRU cache before we start the timer - a = load_audio_chunk(args.warmup_file,0,1) - - # TODO: it should be tested whether it's meaningful - # warm up the ASR, because the very first transcribe takes much more time than the other - asr.transcribe(a) +# warm up the ASR because the very first transcribe takes more time than the others. +# Test results in https://github.com/ufal/whisper_streaming/pull/81 +msg = "Whisper is not warmed up. The first chunk processing may take longer." +if args.warmup_file: + if os.path.isfile(args.warmup_file): + a = load_audio_chunk(args.warmup_file,0,1) + asr.transcribe(a) + print("INFO: Whisper is warmed up.",file=sys.stderr) + else: + print("WARNING: The warm up file is not available. "+msg,file=sys.stderr) else: - print("Whisper is not warmed up",file=sys.stderr) - - + print("WARNING: " + msg, file=sys.stderr) ######### Server objects