diff --git a/README.md b/README.md index 707e58d..e3a7a27 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ online.init() # refresh if you're going to re-use the object for the next audio ### Server -- real-time from mic -`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection. See help message (`-h` option). +`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection and the `--warmup-file`. See the help message (`-h` option). Client example: diff --git a/whisper_online.py b/whisper_online.py index a00547e..c872b23 100644 --- a/whisper_online.py +++ b/whisper_online.py @@ -625,7 +625,7 @@ if __name__ == "__main__": # load the audio into the LRU cache before we start the timer a = load_audio_chunk(audio_path,0,1) - # warm up the ASR, because the very first transcribe takes much more time than the other + # warm up the ASR because the very first transcribe takes much more time than the other asr.transcribe(a) beg = args.start_at diff --git a/whisper_online_server.py b/whisper_online_server.py index e7ad3f2..263ab75 100644 --- a/whisper_online_server.py +++ b/whisper_online_server.py @@ -10,8 +10,8 @@ parser = argparse.ArgumentParser() # server options parser.add_argument("--host", type=str, default='localhost') parser.add_argument("--port", type=int, default=43007) - -parser.add_argument("--warmup-file", type=str, dest="warmup_file") +parser.add_argument("--warmup-file", type=str, dest="warmup_file", + help="The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast. It can be e.g. https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav .") # options from whisper_online @@ -41,19 +41,18 @@ else: tokenizer = None online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec)) - - -if args.warmup_file and os.path.exists(args.warmup_file): - # load the audio into the LRU cache before we start the timer - a = load_audio_chunk(args.warmup_file,0,1) - - # TODO: it should be tested whether it's meaningful - # warm up the ASR, because the very first transcribe takes much more time than the other - asr.transcribe(a) +# warm up the ASR because the very first transcribe takes more time than the others. +# Test results in https://github.com/ufal/whisper_streaming/pull/81 +msg = "Whisper is not warmed up. The first chunk processing may take longer." +if args.warmup_file: + if os.path.isfile(args.warmup_file): + a = load_audio_chunk(args.warmup_file,0,1) + asr.transcribe(a) + print("INFO: Whisper is warmed up.",file=sys.stderr) + else: + print("WARNING: The warm up file is not available. "+msg,file=sys.stderr) else: - print("Whisper is not warmed up",file=sys.stderr) - - + print("WARNING: " + msg, file=sys.stderr) ######### Server objects