mirror of
https://github.com/moltbot/moltbot.git
synced 2026-03-21 16:41:56 +00:00
fix: add "audio" to openai provider capabilities
The openai provider implements transcribeAudio via transcribeOpenAiCompatibleAudio (Whisper API), but its capabilities array only declared ["image"]. This caused the media-understanding runner to skip the openai provider when processing inbound audio messages, resulting in raw audio files being passed to agents instead of transcribed text. Fix: Add "audio" to the capabilities array so the runner correctly selects the openai provider for audio transcription. Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
committed by
Peter Steinberger
parent
6a425d189e
commit
76d6514ff5
@@ -4,7 +4,7 @@ import { transcribeOpenAiCompatibleAudio } from "./audio.js";
|
||||
|
||||
export const openaiProvider: MediaUnderstandingProvider = {
|
||||
id: "openai",
|
||||
capabilities: ["image"],
|
||||
capabilities: ["image", "audio"],
|
||||
describeImage: describeImageWithModel,
|
||||
transcribeAudio: transcribeOpenAiCompatibleAudio,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user