Compare commits
37 Commits
language-d
...
0.2.12
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12a69205ed | ||
|
|
1f684cdd97 | ||
|
|
290470dd60 | ||
|
|
425ac7b51d | ||
|
|
0382cfbeba | ||
|
|
9b1e061b32 | ||
|
|
b4abc158b9 | ||
|
|
5832d7433d | ||
|
|
3736458503 | ||
|
|
374618e050 | ||
|
|
543972ef38 | ||
|
|
971f8473eb | ||
|
|
8434ef5efc | ||
|
|
73f36cc0ef | ||
|
|
a7db39d999 | ||
|
|
a153e11fe0 | ||
|
|
ca6f9246cc | ||
|
|
d080d675a8 | ||
|
|
40bff38933 | ||
|
|
2fe3ca0188 | ||
|
|
545ea15c9a | ||
|
|
8cbaeecc75 | ||
|
|
70e854b346 | ||
|
|
d55490cd27 | ||
|
|
1fa9e1f656 | ||
|
|
994f30e1ed | ||
|
|
b22478c0b4 | ||
|
|
94c34efd90 | ||
|
|
32099b9275 | ||
|
|
9fc6654a4a | ||
|
|
d24c110d55 | ||
|
|
4dd5d8bf8a | ||
|
|
cd9a32a36b | ||
|
|
6caf3e0485 | ||
|
|
93f002cafb | ||
|
|
c5e30c2c07 | ||
|
|
1c2afb8bd2 |
13
README.md
@@ -54,7 +54,15 @@ pip install whisperlivekit
|
|||||||
> - See [tokenizer.py](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/whisperlivekit/simul_whisper/whisper/tokenizer.py) for the list of all available languages.
|
> - See [tokenizer.py](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/whisperlivekit/simul_whisper/whisper/tokenizer.py) for the list of all available languages.
|
||||||
> - For HTTPS requirements, see the **Parameters** section for SSL configuration options.
|
> - For HTTPS requirements, see the **Parameters** section for SSL configuration options.
|
||||||
|
|
||||||
|
#### Use it to capture audio from web pages.
|
||||||
|
|
||||||
|
Go to `chrome-extension` for instructions.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/QuentinFuxa/WhisperLiveKit/refs/heads/main/chrome-extension/demo-extension.png" alt="WhisperLiveKit Demo" width="600">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Optional Dependencies
|
#### Optional Dependencies
|
||||||
|
|
||||||
@@ -132,6 +140,7 @@ async def websocket_endpoint(websocket: WebSocket):
|
|||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
|-----------|-------------|---------|
|
|-----------|-------------|---------|
|
||||||
| `--model` | Whisper model size. List and recommandations [here](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/available_models.md) | `small` |
|
| `--model` | Whisper model size. List and recommandations [here](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/available_models.md) | `small` |
|
||||||
|
| `--model-dir` | Directory containing Whisper model.bin and other files. Overrides `--model`. | `None` |
|
||||||
| `--language` | List [here](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/whisperlivekit/simul_whisper/whisper/tokenizer.py). If you use `auto`, the model attempts to detect the language automatically, but it tends to bias towards English. | `auto` |
|
| `--language` | List [here](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/whisperlivekit/simul_whisper/whisper/tokenizer.py). If you use `auto`, the model attempts to detect the language automatically, but it tends to bias towards English. | `auto` |
|
||||||
| `--target-language` | If sets, activates translation using NLLB. Ex: `fr`. [118 languages available](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/whisperlivekit/translation/mapping_languages.py). If you want to translate to english, you should rather use `--task translate`, since Whisper can do it directly. | `None` |
|
| `--target-language` | If sets, activates translation using NLLB. Ex: `fr`. [118 languages available](https://github.com/QuentinFuxa/WhisperLiveKit/blob/main/whisperlivekit/translation/mapping_languages.py). If you want to translate to english, you should rather use `--task translate`, since Whisper can do it directly. | `None` |
|
||||||
| `--task` | Set to `translate` to translate *only* to english, using Whisper translation. | `transcribe` |
|
| `--task` | Set to `translate` to translate *only* to english, using Whisper translation. | `transcribe` |
|
||||||
@@ -144,6 +153,7 @@ async def websocket_endpoint(websocket: WebSocket):
|
|||||||
| `--port` | Server port | `8000` |
|
| `--port` | Server port | `8000` |
|
||||||
| `--ssl-certfile` | Path to the SSL certificate file (for HTTPS support) | `None` |
|
| `--ssl-certfile` | Path to the SSL certificate file (for HTTPS support) | `None` |
|
||||||
| `--ssl-keyfile` | Path to the SSL private key file (for HTTPS support) | `None` |
|
| `--ssl-keyfile` | Path to the SSL private key file (for HTTPS support) | `None` |
|
||||||
|
| `--forwarded-allow-ips` | Ip or Ips allowed to reverse proxy the whisperlivekit-server. Supported types are IP Addresses (e.g. 127.0.0.1), IP Networks (e.g. 10.100.0.0/16), or Literals (e.g. /path/to/socket.sock) | `None` |
|
||||||
| `--pcm-input` | raw PCM (s16le) data is expected as input and FFmpeg will be bypassed. Frontend will use AudioWorklet instead of MediaRecorder | `False` |
|
| `--pcm-input` | raw PCM (s16le) data is expected as input and FFmpeg will be bypassed. Frontend will use AudioWorklet instead of MediaRecorder | `False` |
|
||||||
|
|
||||||
| Translation options | Description | Default |
|
| Translation options | Description | Default |
|
||||||
@@ -161,6 +171,7 @@ async def websocket_endpoint(websocket: WebSocket):
|
|||||||
| SimulStreaming backend options | Description | Default |
|
| SimulStreaming backend options | Description | Default |
|
||||||
|-----------|-------------|---------|
|
|-----------|-------------|---------|
|
||||||
| `--disable-fast-encoder` | Disable Faster Whisper or MLX Whisper backends for the encoder (if installed). Inference can be slower but helpful when GPU memory is limited | `False` |
|
| `--disable-fast-encoder` | Disable Faster Whisper or MLX Whisper backends for the encoder (if installed). Inference can be slower but helpful when GPU memory is limited | `False` |
|
||||||
|
| `--custom-alignment-heads` | Use your own alignment heads, useful when `--model-dir` is used | `None` |
|
||||||
| `--frame-threshold` | AlignAtt frame threshold (lower = faster, higher = more accurate) | `25` |
|
| `--frame-threshold` | AlignAtt frame threshold (lower = faster, higher = more accurate) | `25` |
|
||||||
| `--beams` | Number of beams for beam search (1 = greedy decoding) | `1` |
|
| `--beams` | Number of beams for beam search (1 = greedy decoding) | `1` |
|
||||||
| `--decoder` | Force decoder type (`beam` or `greedy`) | `auto` |
|
| `--decoder` | Force decoder type (`beam` or `greedy`) | `auto` |
|
||||||
|
|||||||
BIN
architecture.png
|
Before Width: | Height: | Size: 368 KiB After Width: | Height: | Size: 406 KiB |
@@ -1,11 +1,13 @@
|
|||||||
## WhisperLiveKit Chrome Extension v0.1.0
|
## WhisperLiveKit Chrome Extension v0.1.1
|
||||||
Capture the audio of your current tab, transcribe or translate it using WhisperliveKit. **Still unstable**
|
Capture the audio of your current tab, transcribe diarize and translate it using WhisperliveKit, in Chrome and other Chromium-based browsers.
|
||||||
|
|
||||||
|
> Currently, only the tab audio is captured; your microphone audio is not recorded.
|
||||||
|
|
||||||
<img src="https://raw.githubusercontent.com/QuentinFuxa/WhisperLiveKit/refs/heads/main/chrome-extension/demo-extension.png" alt="WhisperLiveKit Demo" width="730">
|
<img src="https://raw.githubusercontent.com/QuentinFuxa/WhisperLiveKit/refs/heads/main/chrome-extension/demo-extension.png" alt="WhisperLiveKit Demo" width="730">
|
||||||
|
|
||||||
## Running this extension
|
## Running this extension
|
||||||
1. Clone this repository.
|
1. Run `python sync_extension.py` to copy frontend files to the `chrome-extension` directory.
|
||||||
2. Load this directory in Chrome as an unpacked extension.
|
2. Load the `chrome-extension` directory in Chrome as an unpacked extension.
|
||||||
|
|
||||||
|
|
||||||
## Devs:
|
## Devs:
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 1.2 MiB After Width: | Height: | Size: 5.8 MiB |
@@ -1,669 +0,0 @@
|
|||||||
/* Theme, WebSocket, recording, rendering logic extracted from inline script and adapted for segmented theme control and WS caption */
|
|
||||||
let isRecording = false;
|
|
||||||
let websocket = null;
|
|
||||||
let recorder = null;
|
|
||||||
let chunkDuration = 100;
|
|
||||||
let websocketUrl = "ws://localhost:8000/asr";
|
|
||||||
let userClosing = false;
|
|
||||||
let wakeLock = null;
|
|
||||||
let startTime = null;
|
|
||||||
let timerInterval = null;
|
|
||||||
let audioContext = null;
|
|
||||||
let analyser = null;
|
|
||||||
let microphone = null;
|
|
||||||
let waveCanvas = document.getElementById("waveCanvas");
|
|
||||||
let waveCtx = waveCanvas.getContext("2d");
|
|
||||||
let animationFrame = null;
|
|
||||||
let waitingForStop = false;
|
|
||||||
let lastReceivedData = null;
|
|
||||||
let lastSignature = null;
|
|
||||||
let availableMicrophones = [];
|
|
||||||
let selectedMicrophoneId = null;
|
|
||||||
|
|
||||||
waveCanvas.width = 60 * (window.devicePixelRatio || 1);
|
|
||||||
waveCanvas.height = 30 * (window.devicePixelRatio || 1);
|
|
||||||
waveCtx.scale(window.devicePixelRatio || 1, window.devicePixelRatio || 1);
|
|
||||||
|
|
||||||
const statusText = document.getElementById("status");
|
|
||||||
const recordButton = document.getElementById("recordButton");
|
|
||||||
const chunkSelector = document.getElementById("chunkSelector");
|
|
||||||
const websocketInput = document.getElementById("websocketInput");
|
|
||||||
const websocketDefaultSpan = document.getElementById("wsDefaultUrl");
|
|
||||||
const linesTranscriptDiv = document.getElementById("linesTranscript");
|
|
||||||
const timerElement = document.querySelector(".timer");
|
|
||||||
const themeRadios = document.querySelectorAll('input[name="theme"]');
|
|
||||||
const microphoneSelect = document.getElementById("microphoneSelect");
|
|
||||||
const settingsToggle = document.getElementById("settingsToggle");
|
|
||||||
const settingsDiv = document.querySelector(".settings");
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
chrome.runtime.onInstalled.addListener((details) => {
|
|
||||||
if (details.reason.search(/install/g) === -1) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
chrome.tabs.create({
|
|
||||||
url: chrome.runtime.getURL("welcome.html"),
|
|
||||||
active: true
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
function getWaveStroke() {
|
|
||||||
const styles = getComputedStyle(document.documentElement);
|
|
||||||
const v = styles.getPropertyValue("--wave-stroke").trim();
|
|
||||||
return v || "#000";
|
|
||||||
}
|
|
||||||
|
|
||||||
let waveStroke = getWaveStroke();
|
|
||||||
function updateWaveStroke() {
|
|
||||||
waveStroke = getWaveStroke();
|
|
||||||
}
|
|
||||||
|
|
||||||
function applyTheme(pref) {
|
|
||||||
if (pref === "light") {
|
|
||||||
document.documentElement.setAttribute("data-theme", "light");
|
|
||||||
} else if (pref === "dark") {
|
|
||||||
document.documentElement.setAttribute("data-theme", "dark");
|
|
||||||
} else {
|
|
||||||
document.documentElement.removeAttribute("data-theme");
|
|
||||||
}
|
|
||||||
updateWaveStroke();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Persisted theme preference
|
|
||||||
const savedThemePref = localStorage.getItem("themePreference") || "system";
|
|
||||||
applyTheme(savedThemePref);
|
|
||||||
if (themeRadios.length) {
|
|
||||||
themeRadios.forEach((r) => {
|
|
||||||
r.checked = r.value === savedThemePref;
|
|
||||||
r.addEventListener("change", () => {
|
|
||||||
if (r.checked) {
|
|
||||||
localStorage.setItem("themePreference", r.value);
|
|
||||||
applyTheme(r.value);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// React to OS theme changes when in "system" mode
|
|
||||||
const darkMq = window.matchMedia && window.matchMedia("(prefers-color-scheme: dark)");
|
|
||||||
const handleOsThemeChange = () => {
|
|
||||||
const pref = localStorage.getItem("themePreference") || "system";
|
|
||||||
if (pref === "system") updateWaveStroke();
|
|
||||||
};
|
|
||||||
if (darkMq && darkMq.addEventListener) {
|
|
||||||
darkMq.addEventListener("change", handleOsThemeChange);
|
|
||||||
} else if (darkMq && darkMq.addListener) {
|
|
||||||
// deprecated, but included for Safari compatibility
|
|
||||||
darkMq.addListener(handleOsThemeChange);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function enumerateMicrophones() {
|
|
||||||
try {
|
|
||||||
const micPermission = await navigator.permissions.query({
|
|
||||||
name: "microphone",
|
|
||||||
});
|
|
||||||
|
|
||||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
||||||
stream.getTracks().forEach(track => track.stop());
|
|
||||||
|
|
||||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
|
||||||
availableMicrophones = devices.filter(device => device.kind === 'audioinput');
|
|
||||||
|
|
||||||
populateMicrophoneSelect();
|
|
||||||
console.log(`Found ${availableMicrophones.length} microphone(s)`);
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Error enumerating microphones:', error);
|
|
||||||
statusText.textContent = "Error accessing microphones. Please grant permission.";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function populateMicrophoneSelect() {
|
|
||||||
if (!microphoneSelect) return;
|
|
||||||
|
|
||||||
microphoneSelect.innerHTML = '<option value="">Default Microphone</option>';
|
|
||||||
|
|
||||||
availableMicrophones.forEach((device, index) => {
|
|
||||||
const option = document.createElement('option');
|
|
||||||
option.value = device.deviceId;
|
|
||||||
option.textContent = device.label || `Microphone ${index + 1}`;
|
|
||||||
microphoneSelect.appendChild(option);
|
|
||||||
});
|
|
||||||
|
|
||||||
const savedMicId = localStorage.getItem('selectedMicrophone');
|
|
||||||
if (savedMicId && availableMicrophones.some(mic => mic.deviceId === savedMicId)) {
|
|
||||||
microphoneSelect.value = savedMicId;
|
|
||||||
selectedMicrophoneId = savedMicId;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function handleMicrophoneChange() {
|
|
||||||
selectedMicrophoneId = microphoneSelect.value || null;
|
|
||||||
localStorage.setItem('selectedMicrophone', selectedMicrophoneId || '');
|
|
||||||
|
|
||||||
const selectedDevice = availableMicrophones.find(mic => mic.deviceId === selectedMicrophoneId);
|
|
||||||
const deviceName = selectedDevice ? selectedDevice.label : 'Default Microphone';
|
|
||||||
|
|
||||||
console.log(`Selected microphone: ${deviceName}`);
|
|
||||||
statusText.textContent = `Microphone changed to: ${deviceName}`;
|
|
||||||
|
|
||||||
if (isRecording) {
|
|
||||||
statusText.textContent = "Switching microphone... Please wait.";
|
|
||||||
stopRecording().then(() => {
|
|
||||||
setTimeout(() => {
|
|
||||||
toggleRecording();
|
|
||||||
}, 1000);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helpers
|
|
||||||
function fmt1(x) {
|
|
||||||
const n = Number(x);
|
|
||||||
return Number.isFinite(n) ? n.toFixed(1) : x;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default WebSocket URL computation
|
|
||||||
const host = window.location.hostname || "localhost";
|
|
||||||
const port = window.location.port;
|
|
||||||
const protocol = window.location.protocol === "https:" ? "wss" : "ws";
|
|
||||||
const defaultWebSocketUrl = websocketUrl;
|
|
||||||
|
|
||||||
// Populate default caption and input
|
|
||||||
if (websocketDefaultSpan) websocketDefaultSpan.textContent = defaultWebSocketUrl;
|
|
||||||
websocketInput.value = defaultWebSocketUrl;
|
|
||||||
websocketUrl = defaultWebSocketUrl;
|
|
||||||
|
|
||||||
// Optional chunk selector (guard for presence)
|
|
||||||
if (chunkSelector) {
|
|
||||||
chunkSelector.addEventListener("change", () => {
|
|
||||||
chunkDuration = parseInt(chunkSelector.value);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// WebSocket input change handling
|
|
||||||
websocketInput.addEventListener("change", () => {
|
|
||||||
const urlValue = websocketInput.value.trim();
|
|
||||||
if (!urlValue.startsWith("ws://") && !urlValue.startsWith("wss://")) {
|
|
||||||
statusText.textContent = "Invalid WebSocket URL (must start with ws:// or wss://)";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
websocketUrl = urlValue;
|
|
||||||
statusText.textContent = "WebSocket URL updated. Ready to connect.";
|
|
||||||
});
|
|
||||||
|
|
||||||
function setupWebSocket() {
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
try {
|
|
||||||
websocket = new WebSocket(websocketUrl);
|
|
||||||
} catch (error) {
|
|
||||||
statusText.textContent = "Invalid WebSocket URL. Please check and try again.";
|
|
||||||
reject(error);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
websocket.onopen = () => {
|
|
||||||
statusText.textContent = "Connected to server.";
|
|
||||||
resolve();
|
|
||||||
};
|
|
||||||
|
|
||||||
websocket.onclose = () => {
|
|
||||||
if (userClosing) {
|
|
||||||
if (waitingForStop) {
|
|
||||||
statusText.textContent = "Processing finalized or connection closed.";
|
|
||||||
if (lastReceivedData) {
|
|
||||||
renderLinesWithBuffer(
|
|
||||||
lastReceivedData.lines || [],
|
|
||||||
lastReceivedData.buffer_diarization || "",
|
|
||||||
lastReceivedData.buffer_transcription || "",
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
true
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
statusText.textContent = "Disconnected from the WebSocket server. (Check logs if model is loading.)";
|
|
||||||
if (isRecording) {
|
|
||||||
stopRecording();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isRecording = false;
|
|
||||||
waitingForStop = false;
|
|
||||||
userClosing = false;
|
|
||||||
lastReceivedData = null;
|
|
||||||
websocket = null;
|
|
||||||
updateUI();
|
|
||||||
};
|
|
||||||
|
|
||||||
websocket.onerror = () => {
|
|
||||||
statusText.textContent = "Error connecting to WebSocket.";
|
|
||||||
reject(new Error("Error connecting to WebSocket"));
|
|
||||||
};
|
|
||||||
|
|
||||||
websocket.onmessage = (event) => {
|
|
||||||
const data = JSON.parse(event.data);
|
|
||||||
|
|
||||||
if (data.type === "ready_to_stop") {
|
|
||||||
console.log("Ready to stop received, finalizing display and closing WebSocket.");
|
|
||||||
waitingForStop = false;
|
|
||||||
|
|
||||||
if (lastReceivedData) {
|
|
||||||
renderLinesWithBuffer(
|
|
||||||
lastReceivedData.lines || [],
|
|
||||||
lastReceivedData.buffer_diarization || "",
|
|
||||||
lastReceivedData.buffer_transcription || "",
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
true
|
|
||||||
);
|
|
||||||
}
|
|
||||||
statusText.textContent = "Finished processing audio! Ready to record again.";
|
|
||||||
recordButton.disabled = false;
|
|
||||||
|
|
||||||
if (websocket) {
|
|
||||||
websocket.close();
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
lastReceivedData = data;
|
|
||||||
|
|
||||||
const {
|
|
||||||
lines = [],
|
|
||||||
buffer_transcription = "",
|
|
||||||
buffer_diarization = "",
|
|
||||||
remaining_time_transcription = 0,
|
|
||||||
remaining_time_diarization = 0,
|
|
||||||
status = "active_transcription",
|
|
||||||
} = data;
|
|
||||||
|
|
||||||
renderLinesWithBuffer(
|
|
||||||
lines,
|
|
||||||
buffer_diarization,
|
|
||||||
buffer_transcription,
|
|
||||||
remaining_time_diarization,
|
|
||||||
remaining_time_transcription,
|
|
||||||
false,
|
|
||||||
status
|
|
||||||
);
|
|
||||||
};
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function renderLinesWithBuffer(
|
|
||||||
lines,
|
|
||||||
buffer_diarization,
|
|
||||||
buffer_transcription,
|
|
||||||
remaining_time_diarization,
|
|
||||||
remaining_time_transcription,
|
|
||||||
isFinalizing = false,
|
|
||||||
current_status = "active_transcription"
|
|
||||||
) {
|
|
||||||
if (current_status === "no_audio_detected") {
|
|
||||||
linesTranscriptDiv.innerHTML =
|
|
||||||
"<p style='text-align: center; color: var(--muted); margin-top: 20px;'><em>No audio detected...</em></p>";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const showLoading = !isFinalizing && (lines || []).some((it) => it.speaker == 0);
|
|
||||||
const showTransLag = !isFinalizing && remaining_time_transcription > 0;
|
|
||||||
const showDiaLag = !isFinalizing && !!buffer_diarization && remaining_time_diarization > 0;
|
|
||||||
const signature = JSON.stringify({
|
|
||||||
lines: (lines || []).map((it) => ({ speaker: it.speaker, text: it.text, start: it.start, end: it.end })),
|
|
||||||
buffer_transcription: buffer_transcription || "",
|
|
||||||
buffer_diarization: buffer_diarization || "",
|
|
||||||
status: current_status,
|
|
||||||
showLoading,
|
|
||||||
showTransLag,
|
|
||||||
showDiaLag,
|
|
||||||
isFinalizing: !!isFinalizing,
|
|
||||||
});
|
|
||||||
if (lastSignature === signature) {
|
|
||||||
const t = document.querySelector(".lag-transcription-value");
|
|
||||||
if (t) t.textContent = fmt1(remaining_time_transcription);
|
|
||||||
const d = document.querySelector(".lag-diarization-value");
|
|
||||||
if (d) d.textContent = fmt1(remaining_time_diarization);
|
|
||||||
const ld = document.querySelector(".loading-diarization-value");
|
|
||||||
if (ld) ld.textContent = fmt1(remaining_time_diarization);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
lastSignature = signature;
|
|
||||||
|
|
||||||
const linesHtml = (lines || [])
|
|
||||||
.map((item, idx) => {
|
|
||||||
let timeInfo = "";
|
|
||||||
if (item.start !== undefined && item.end !== undefined) {
|
|
||||||
timeInfo = ` ${item.start} - ${item.end}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
let speakerLabel = "";
|
|
||||||
if (item.speaker === -2) {
|
|
||||||
speakerLabel = `<span class="silence">Silence<span id='timeInfo'>${timeInfo}</span></span>`;
|
|
||||||
} else if (item.speaker == 0 && !isFinalizing) {
|
|
||||||
speakerLabel = `<span class='loading'><span class="spinner"></span><span id='timeInfo'><span class="loading-diarization-value">${fmt1(
|
|
||||||
remaining_time_diarization
|
|
||||||
)}</span> second(s) of audio are undergoing diarization</span></span>`;
|
|
||||||
} else if (item.speaker !== 0) {
|
|
||||||
speakerLabel = `<span id="speaker">Speaker ${item.speaker}<span id='timeInfo'>${timeInfo}</span></span>`;
|
|
||||||
}
|
|
||||||
|
|
||||||
let currentLineText = item.text || "";
|
|
||||||
|
|
||||||
if (idx === lines.length - 1) {
|
|
||||||
if (!isFinalizing && item.speaker !== -2) {
|
|
||||||
if (remaining_time_transcription > 0) {
|
|
||||||
speakerLabel += `<span class="label_transcription"><span class="spinner"></span>Lag <span id='timeInfo'><span class="lag-transcription-value">${fmt1(
|
|
||||||
remaining_time_transcription
|
|
||||||
)}</span>s</span></span>`;
|
|
||||||
}
|
|
||||||
if (buffer_diarization && remaining_time_diarization > 0) {
|
|
||||||
speakerLabel += `<span class="label_diarization"><span class="spinner"></span>Lag<span id='timeInfo'><span class="lag-diarization-value">${fmt1(
|
|
||||||
remaining_time_diarization
|
|
||||||
)}</span>s</span></span>`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buffer_diarization) {
|
|
||||||
if (isFinalizing) {
|
|
||||||
currentLineText +=
|
|
||||||
(currentLineText.length > 0 && buffer_diarization.trim().length > 0 ? " " : "") + buffer_diarization.trim();
|
|
||||||
} else {
|
|
||||||
currentLineText += `<span class="buffer_diarization">${buffer_diarization}</span>`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (buffer_transcription) {
|
|
||||||
if (isFinalizing) {
|
|
||||||
currentLineText +=
|
|
||||||
(currentLineText.length > 0 && buffer_transcription.trim().length > 0 ? " " : "") +
|
|
||||||
buffer_transcription.trim();
|
|
||||||
} else {
|
|
||||||
currentLineText += `<span class="buffer_transcription">${buffer_transcription}</span>`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return currentLineText.trim().length > 0 || speakerLabel.length > 0
|
|
||||||
? `<p>${speakerLabel}<br/><div class='textcontent'>${currentLineText}</div></p>`
|
|
||||||
: `<p>${speakerLabel}<br/></p>`;
|
|
||||||
})
|
|
||||||
.join("");
|
|
||||||
|
|
||||||
linesTranscriptDiv.innerHTML = linesHtml;
|
|
||||||
window.scrollTo({ top: document.body.scrollHeight, behavior: "smooth" });
|
|
||||||
}
|
|
||||||
|
|
||||||
function updateTimer() {
|
|
||||||
if (!startTime) return;
|
|
||||||
|
|
||||||
const elapsed = Math.floor((Date.now() - startTime) / 1000);
|
|
||||||
const minutes = Math.floor(elapsed / 60).toString().padStart(2, "0");
|
|
||||||
const seconds = (elapsed % 60).toString().padStart(2, "0");
|
|
||||||
timerElement.textContent = `${minutes}:${seconds}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
function drawWaveform() {
|
|
||||||
if (!analyser) return;
|
|
||||||
|
|
||||||
const bufferLength = analyser.frequencyBinCount;
|
|
||||||
const dataArray = new Uint8Array(bufferLength);
|
|
||||||
analyser.getByteTimeDomainData(dataArray);
|
|
||||||
|
|
||||||
waveCtx.clearRect(
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
waveCanvas.width / (window.devicePixelRatio || 1),
|
|
||||||
waveCanvas.height / (window.devicePixelRatio || 1)
|
|
||||||
);
|
|
||||||
waveCtx.lineWidth = 1;
|
|
||||||
waveCtx.strokeStyle = waveStroke;
|
|
||||||
waveCtx.beginPath();
|
|
||||||
|
|
||||||
const sliceWidth = (waveCanvas.width / (window.devicePixelRatio || 1)) / bufferLength;
|
|
||||||
let x = 0;
|
|
||||||
|
|
||||||
for (let i = 0; i < bufferLength; i++) {
|
|
||||||
const v = dataArray[i] / 128.0;
|
|
||||||
const y = (v * (waveCanvas.height / (window.devicePixelRatio || 1))) / 2;
|
|
||||||
|
|
||||||
if (i === 0) {
|
|
||||||
waveCtx.moveTo(x, y);
|
|
||||||
} else {
|
|
||||||
waveCtx.lineTo(x, y);
|
|
||||||
}
|
|
||||||
|
|
||||||
x += sliceWidth;
|
|
||||||
}
|
|
||||||
|
|
||||||
waveCtx.lineTo(
|
|
||||||
waveCanvas.width / (window.devicePixelRatio || 1),
|
|
||||||
(waveCanvas.height / (window.devicePixelRatio || 1)) / 2
|
|
||||||
);
|
|
||||||
waveCtx.stroke();
|
|
||||||
|
|
||||||
animationFrame = requestAnimationFrame(drawWaveform);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function startRecording() {
|
|
||||||
try {
|
|
||||||
try {
|
|
||||||
wakeLock = await navigator.wakeLock.request("screen");
|
|
||||||
} catch (err) {
|
|
||||||
console.log("Error acquiring wake lock.");
|
|
||||||
}
|
|
||||||
|
|
||||||
let stream;
|
|
||||||
try {
|
|
||||||
// Try tab capture first
|
|
||||||
stream = await new Promise((resolve, reject) => {
|
|
||||||
chrome.tabCapture.capture({audio: true}, (s) => {
|
|
||||||
if (s) {
|
|
||||||
resolve(s);
|
|
||||||
} else {
|
|
||||||
reject(new Error('Tab capture failed or not available'));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
statusText.textContent = "Using tab audio capture.";
|
|
||||||
} catch (tabError) {
|
|
||||||
console.log('Tab capture not available, falling back to microphone', tabError);
|
|
||||||
// Fallback to microphone
|
|
||||||
const audioConstraints = selectedMicrophoneId
|
|
||||||
? { audio: { deviceId: { exact: selectedMicrophoneId } } }
|
|
||||||
: { audio: true };
|
|
||||||
stream = await navigator.mediaDevices.getUserMedia(audioConstraints);
|
|
||||||
statusText.textContent = "Using microphone audio.";
|
|
||||||
}
|
|
||||||
|
|
||||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
|
||||||
analyser = audioContext.createAnalyser();
|
|
||||||
analyser.fftSize = 256;
|
|
||||||
microphone = audioContext.createMediaStreamSource(stream);
|
|
||||||
microphone.connect(analyser);
|
|
||||||
|
|
||||||
recorder = new MediaRecorder(stream, { mimeType: "audio/webm" });
|
|
||||||
recorder.ondataavailable = (e) => {
|
|
||||||
if (websocket && websocket.readyState === WebSocket.OPEN) {
|
|
||||||
websocket.send(e.data);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
recorder.start(chunkDuration);
|
|
||||||
|
|
||||||
startTime = Date.now();
|
|
||||||
timerInterval = setInterval(updateTimer, 1000);
|
|
||||||
drawWaveform();
|
|
||||||
|
|
||||||
isRecording = true;
|
|
||||||
updateUI();
|
|
||||||
} catch (err) {
|
|
||||||
if (window.location.hostname === "0.0.0.0") {
|
|
||||||
statusText.textContent =
|
|
||||||
"Error accessing audio input. Browsers may block audio access on 0.0.0.0. Try using localhost:8000 instead.";
|
|
||||||
} else {
|
|
||||||
statusText.textContent = "Error accessing audio input. Please check permissions.";
|
|
||||||
}
|
|
||||||
console.error(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function stopRecording() {
|
|
||||||
if (wakeLock) {
|
|
||||||
try {
|
|
||||||
await wakeLock.release();
|
|
||||||
} catch (e) {
|
|
||||||
// ignore
|
|
||||||
}
|
|
||||||
wakeLock = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
userClosing = true;
|
|
||||||
waitingForStop = true;
|
|
||||||
|
|
||||||
if (websocket && websocket.readyState === WebSocket.OPEN) {
|
|
||||||
const emptyBlob = new Blob([], { type: "audio/webm" });
|
|
||||||
websocket.send(emptyBlob);
|
|
||||||
statusText.textContent = "Recording stopped. Processing final audio...";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (recorder) {
|
|
||||||
recorder.stop();
|
|
||||||
recorder = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (microphone) {
|
|
||||||
microphone.disconnect();
|
|
||||||
microphone = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (analyser) {
|
|
||||||
analyser = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (audioContext && audioContext.state !== "closed") {
|
|
||||||
try {
|
|
||||||
await audioContext.close();
|
|
||||||
} catch (e) {
|
|
||||||
console.warn("Could not close audio context:", e);
|
|
||||||
}
|
|
||||||
audioContext = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (animationFrame) {
|
|
||||||
cancelAnimationFrame(animationFrame);
|
|
||||||
animationFrame = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (timerInterval) {
|
|
||||||
clearInterval(timerInterval);
|
|
||||||
timerInterval = null;
|
|
||||||
}
|
|
||||||
timerElement.textContent = "00:00";
|
|
||||||
startTime = null;
|
|
||||||
|
|
||||||
isRecording = false;
|
|
||||||
updateUI();
|
|
||||||
}
|
|
||||||
|
|
||||||
async function toggleRecording() {
|
|
||||||
if (!isRecording) {
|
|
||||||
if (waitingForStop) {
|
|
||||||
console.log("Waiting for stop, early return");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
console.log("Connecting to WebSocket");
|
|
||||||
try {
|
|
||||||
if (websocket && websocket.readyState === WebSocket.OPEN) {
|
|
||||||
await startRecording();
|
|
||||||
} else {
|
|
||||||
await setupWebSocket();
|
|
||||||
await startRecording();
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
statusText.textContent = "Could not connect to WebSocket or access mic. Aborted.";
|
|
||||||
console.error(err);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
console.log("Stopping recording");
|
|
||||||
stopRecording();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function updateUI() {
|
|
||||||
recordButton.classList.toggle("recording", isRecording);
|
|
||||||
recordButton.disabled = waitingForStop;
|
|
||||||
|
|
||||||
if (waitingForStop) {
|
|
||||||
if (statusText.textContent !== "Recording stopped. Processing final audio...") {
|
|
||||||
statusText.textContent = "Please wait for processing to complete...";
|
|
||||||
}
|
|
||||||
} else if (isRecording) {
|
|
||||||
statusText.textContent = "Recording...";
|
|
||||||
} else {
|
|
||||||
if (
|
|
||||||
statusText.textContent !== "Finished processing audio! Ready to record again." &&
|
|
||||||
statusText.textContent !== "Processing finalized or connection closed."
|
|
||||||
) {
|
|
||||||
statusText.textContent = "Click to start transcription";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!waitingForStop) {
|
|
||||||
recordButton.disabled = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
recordButton.addEventListener("click", toggleRecording);
|
|
||||||
|
|
||||||
if (microphoneSelect) {
|
|
||||||
microphoneSelect.addEventListener("change", handleMicrophoneChange);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Settings toggle functionality
|
|
||||||
settingsToggle.addEventListener("click", () => {
|
|
||||||
settingsDiv.classList.toggle("visible");
|
|
||||||
settingsToggle.classList.toggle("active");
|
|
||||||
});
|
|
||||||
|
|
||||||
document.addEventListener('DOMContentLoaded', async () => {
|
|
||||||
try {
|
|
||||||
await enumerateMicrophones();
|
|
||||||
} catch (error) {
|
|
||||||
console.log("Could not enumerate microphones on load:", error);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
navigator.mediaDevices.addEventListener('devicechange', async () => {
|
|
||||||
console.log('Device change detected, re-enumerating microphones');
|
|
||||||
try {
|
|
||||||
await enumerateMicrophones();
|
|
||||||
} catch (error) {
|
|
||||||
console.log("Error re-enumerating microphones:", error);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
async function run() {
|
|
||||||
const micPermission = await navigator.permissions.query({
|
|
||||||
name: "microphone",
|
|
||||||
});
|
|
||||||
|
|
||||||
document.getElementById(
|
|
||||||
"audioPermission"
|
|
||||||
).innerText = `MICROPHONE: ${micPermission.state}`;
|
|
||||||
|
|
||||||
if (micPermission.state !== "granted") {
|
|
||||||
chrome.tabs.create({ url: "welcome.html" });
|
|
||||||
}
|
|
||||||
|
|
||||||
const intervalId = setInterval(async () => {
|
|
||||||
const micPermission = await navigator.permissions.query({
|
|
||||||
name: "microphone",
|
|
||||||
});
|
|
||||||
if (micPermission.state === "granted") {
|
|
||||||
document.getElementById(
|
|
||||||
"audioPermission"
|
|
||||||
).innerText = `MICROPHONE: ${micPermission.state}`;
|
|
||||||
clearInterval(intervalId);
|
|
||||||
}
|
|
||||||
}, 100);
|
|
||||||
}
|
|
||||||
|
|
||||||
void run();
|
|
||||||
@@ -3,9 +3,6 @@
|
|||||||
"name": "WhisperLiveKit Tab Capture",
|
"name": "WhisperLiveKit Tab Capture",
|
||||||
"version": "1.0",
|
"version": "1.0",
|
||||||
"description": "Capture and transcribe audio from browser tabs using WhisperLiveKit.",
|
"description": "Capture and transcribe audio from browser tabs using WhisperLiveKit.",
|
||||||
"background": {
|
|
||||||
"service_worker": "background.js"
|
|
||||||
},
|
|
||||||
"icons": {
|
"icons": {
|
||||||
"16": "icons/icon16.png",
|
"16": "icons/icon16.png",
|
||||||
"32": "icons/icon32.png",
|
"32": "icons/icon32.png",
|
||||||
@@ -14,7 +11,7 @@
|
|||||||
},
|
},
|
||||||
"action": {
|
"action": {
|
||||||
"default_title": "WhisperLiveKit Tab Capture",
|
"default_title": "WhisperLiveKit Tab Capture",
|
||||||
"default_popup": "popup.html"
|
"default_popup": "live_transcription.html"
|
||||||
},
|
},
|
||||||
"permissions": [
|
"permissions": [
|
||||||
"scripting",
|
"scripting",
|
||||||
@@ -22,16 +19,5 @@
|
|||||||
"offscreen",
|
"offscreen",
|
||||||
"activeTab",
|
"activeTab",
|
||||||
"storage"
|
"storage"
|
||||||
],
|
|
||||||
"web_accessible_resources": [
|
|
||||||
{
|
|
||||||
"resources": [
|
|
||||||
"requestPermissions.html",
|
|
||||||
"requestPermissions.js"
|
|
||||||
],
|
|
||||||
"matches": [
|
|
||||||
"<all_urls>"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8" />
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
||||||
<title>WhisperLiveKit</title>
|
|
||||||
<link rel="stylesheet" href="/web/live_transcription.css" />
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div class="settings-container">
|
|
||||||
<button id="recordButton">
|
|
||||||
<div class="shape-container">
|
|
||||||
<div class="shape"></div>
|
|
||||||
</div>
|
|
||||||
<div class="recording-info">
|
|
||||||
<div class="wave-container">
|
|
||||||
<canvas id="waveCanvas"></canvas>
|
|
||||||
</div>
|
|
||||||
<div class="timer">00:00</div>
|
|
||||||
</div>
|
|
||||||
</button>
|
|
||||||
|
|
||||||
<button id="settingsToggle" class="settings-toggle" title="Show/hide settings">
|
|
||||||
<img src="/web/src/settings.svg" alt="Settings" />
|
|
||||||
</button>
|
|
||||||
|
|
||||||
<div class="settings">
|
|
||||||
<div class="field">
|
|
||||||
<label for="websocketInput">Websocket URL</label>
|
|
||||||
<input id="websocketInput" type="text" placeholder="ws://host:port/asr" />
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="field">
|
|
||||||
<label id="microphoneSelectLabel" for="microphoneSelect">Select Microphone</label>
|
|
||||||
<select id="microphoneSelect">
|
|
||||||
<option value="">Default Microphone</option>
|
|
||||||
</select>
|
|
||||||
<div id="audioPermission"></div>
|
|
||||||
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="theme-selector-container">
|
|
||||||
<div class="segmented" role="radiogroup" aria-label="Theme selector">
|
|
||||||
<input type="radio" id="theme-system" name="theme" value="system" />
|
|
||||||
<label for="theme-system" title="System">
|
|
||||||
<img src="/web/src/system_mode.svg" alt="" />
|
|
||||||
<!-- <span>System</span> -->
|
|
||||||
</label>
|
|
||||||
|
|
||||||
<input type="radio" id="theme-light" name="theme" value="light" />
|
|
||||||
<label for="theme-light" title="Light">
|
|
||||||
<img src="/web/src/light_mode.svg" alt="" />
|
|
||||||
<!-- <span>Light</span> -->
|
|
||||||
</label>
|
|
||||||
|
|
||||||
<input type="radio" id="theme-dark" name="theme" value="dark" />
|
|
||||||
<label for="theme-dark" title="Dark">
|
|
||||||
<img src="/web/src/dark_mode.svg" alt="" />
|
|
||||||
<!-- <span>Dark</span> -->
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<p id="status"></p>
|
|
||||||
|
|
||||||
<div id="linesTranscript"></div>
|
|
||||||
|
|
||||||
<script src="live_transcription.js"></script>
|
|
||||||
</body>
|
|
||||||
|
|
||||||
</html>
|
|
||||||
@@ -1,539 +0,0 @@
|
|||||||
:root {
|
|
||||||
--bg: #ffffff;
|
|
||||||
--text: #111111;
|
|
||||||
--muted: #666666;
|
|
||||||
--border: #e5e5e5;
|
|
||||||
--chip-bg: rgba(0, 0, 0, 0.04);
|
|
||||||
--chip-text: #000000;
|
|
||||||
--spinner-border: #8d8d8d5c;
|
|
||||||
--spinner-top: #b0b0b0;
|
|
||||||
--silence-bg: #f3f3f3;
|
|
||||||
--loading-bg: rgba(255, 77, 77, 0.06);
|
|
||||||
--button-bg: #ffffff;
|
|
||||||
--button-border: #e9e9e9;
|
|
||||||
--wave-stroke: #000000;
|
|
||||||
--label-dia-text: #868686;
|
|
||||||
--label-trans-text: #111111;
|
|
||||||
}
|
|
||||||
|
|
||||||
@media (prefers-color-scheme: dark) {
|
|
||||||
:root:not([data-theme="light"]) {
|
|
||||||
--bg: #0b0b0b;
|
|
||||||
--text: #e6e6e6;
|
|
||||||
--muted: #9aa0a6;
|
|
||||||
--border: #333333;
|
|
||||||
--chip-bg: rgba(255, 255, 255, 0.08);
|
|
||||||
--chip-text: #e6e6e6;
|
|
||||||
--spinner-border: #555555;
|
|
||||||
--spinner-top: #dddddd;
|
|
||||||
--silence-bg: #1a1a1a;
|
|
||||||
--loading-bg: rgba(255, 77, 77, 0.12);
|
|
||||||
--button-bg: #111111;
|
|
||||||
--button-border: #333333;
|
|
||||||
--wave-stroke: #e6e6e6;
|
|
||||||
--label-dia-text: #b3b3b3;
|
|
||||||
--label-trans-text: #ffffff;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
:root[data-theme="dark"] {
|
|
||||||
--bg: #0b0b0b;
|
|
||||||
--text: #e6e6e6;
|
|
||||||
--muted: #9aa0a6;
|
|
||||||
--border: #333333;
|
|
||||||
--chip-bg: rgba(255, 255, 255, 0.08);
|
|
||||||
--chip-text: #e6e6e6;
|
|
||||||
--spinner-border: #555555;
|
|
||||||
--spinner-top: #dddddd;
|
|
||||||
--silence-bg: #1a1a1a;
|
|
||||||
--loading-bg: rgba(255, 77, 77, 0.12);
|
|
||||||
--button-bg: #111111;
|
|
||||||
--button-border: #333333;
|
|
||||||
--wave-stroke: #e6e6e6;
|
|
||||||
--label-dia-text: #b3b3b3;
|
|
||||||
--label-trans-text: #ffffff;
|
|
||||||
}
|
|
||||||
|
|
||||||
:root[data-theme="light"] {
|
|
||||||
--bg: #ffffff;
|
|
||||||
--text: #111111;
|
|
||||||
--muted: #666666;
|
|
||||||
--border: #e5e5e5;
|
|
||||||
--chip-bg: rgba(0, 0, 0, 0.04);
|
|
||||||
--chip-text: #000000;
|
|
||||||
--spinner-border: #8d8d8d5c;
|
|
||||||
--spinner-top: #b0b0b0;
|
|
||||||
--silence-bg: #f3f3f3;
|
|
||||||
--loading-bg: rgba(255, 77, 77, 0.06);
|
|
||||||
--button-bg: #ffffff;
|
|
||||||
--button-border: #e9e9e9;
|
|
||||||
--wave-stroke: #000000;
|
|
||||||
--label-dia-text: #868686;
|
|
||||||
--label-trans-text: #111111;
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
|
||||||
font-family: ui-sans-serif, system-ui, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
|
|
||||||
margin: 20px;
|
|
||||||
text-align: center;
|
|
||||||
background-color: var(--bg);
|
|
||||||
color: var(--text);
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings-toggle {
|
|
||||||
margin-top: 4px;
|
|
||||||
width: 40px;
|
|
||||||
height: 40px;
|
|
||||||
border: none;
|
|
||||||
border-radius: 50%;
|
|
||||||
background-color: var(--button-bg);
|
|
||||||
cursor: pointer;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
/* border: 1px solid var(--button-border); */
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
position: relative;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings-toggle:hover {
|
|
||||||
background-color: var(--chip-bg);
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings-toggle img {
|
|
||||||
width: 24px;
|
|
||||||
height: 24px;
|
|
||||||
opacity: 0.7;
|
|
||||||
transition: opacity 0.2s ease, transform 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings-toggle:hover img {
|
|
||||||
opacity: 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings-toggle.active img {
|
|
||||||
transform: rotate(80deg);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Record button */
|
|
||||||
#recordButton {
|
|
||||||
width: 50px;
|
|
||||||
height: 50px;
|
|
||||||
border: none;
|
|
||||||
border-radius: 50%;
|
|
||||||
background-color: var(--button-bg);
|
|
||||||
cursor: pointer;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
border: 1px solid var(--button-border);
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
position: relative;
|
|
||||||
}
|
|
||||||
|
|
||||||
#recordButton.recording {
|
|
||||||
width: 180px;
|
|
||||||
border-radius: 40px;
|
|
||||||
justify-content: flex-start;
|
|
||||||
padding-left: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#recordButton:active {
|
|
||||||
transform: scale(0.95);
|
|
||||||
}
|
|
||||||
|
|
||||||
.shape-container {
|
|
||||||
width: 25px;
|
|
||||||
height: 25px;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
flex-shrink: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
.shape {
|
|
||||||
width: 25px;
|
|
||||||
height: 25px;
|
|
||||||
background-color: rgb(209, 61, 53);
|
|
||||||
border-radius: 50%;
|
|
||||||
transition: all 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
#recordButton:disabled .shape {
|
|
||||||
background-color: #6e6d6d;
|
|
||||||
}
|
|
||||||
|
|
||||||
#recordButton.recording .shape {
|
|
||||||
border-radius: 5px;
|
|
||||||
width: 25px;
|
|
||||||
height: 25px;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Recording elements */
|
|
||||||
.recording-info {
|
|
||||||
display: none;
|
|
||||||
align-items: center;
|
|
||||||
margin-left: 15px;
|
|
||||||
flex-grow: 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#recordButton.recording .recording-info {
|
|
||||||
display: flex;
|
|
||||||
}
|
|
||||||
|
|
||||||
.wave-container {
|
|
||||||
width: 60px;
|
|
||||||
height: 30px;
|
|
||||||
position: relative;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
#waveCanvas {
|
|
||||||
width: 100%;
|
|
||||||
height: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
.timer {
|
|
||||||
font-size: 14px;
|
|
||||||
font-weight: 500;
|
|
||||||
color: var(--text);
|
|
||||||
margin-left: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#status {
|
|
||||||
margin-top: 20px;
|
|
||||||
font-size: 16px;
|
|
||||||
color: var(--text);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Settings */
|
|
||||||
.settings-container {
|
|
||||||
display: flex;
|
|
||||||
justify-content: center;
|
|
||||||
align-items: flex-start;
|
|
||||||
gap: 15px;
|
|
||||||
margin-top: 20px;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings {
|
|
||||||
display: none;
|
|
||||||
flex-wrap: wrap;
|
|
||||||
align-items: flex-start;
|
|
||||||
gap: 12px;
|
|
||||||
transition: opacity 0.3s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings.visible {
|
|
||||||
display: flex;
|
|
||||||
}
|
|
||||||
|
|
||||||
.field {
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
align-items: flex-start;
|
|
||||||
gap: 3px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#chunkSelector,
|
|
||||||
#websocketInput,
|
|
||||||
#themeSelector,
|
|
||||||
#microphoneSelect {
|
|
||||||
font-size: 16px;
|
|
||||||
padding: 5px 8px;
|
|
||||||
border-radius: 8px;
|
|
||||||
border: 1px solid var(--border);
|
|
||||||
background-color: var(--button-bg);
|
|
||||||
color: var(--text);
|
|
||||||
max-height: 30px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#microphoneSelect {
|
|
||||||
width: 100%;
|
|
||||||
max-width: 190px;
|
|
||||||
min-width: 120px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#chunkSelector:focus,
|
|
||||||
#websocketInput:focus,
|
|
||||||
#themeSelector:focus,
|
|
||||||
#microphoneSelect:focus {
|
|
||||||
outline: none;
|
|
||||||
border-color: #007bff;
|
|
||||||
box-shadow: 0 0 0 3px rgba(0, 123, 255, 0.15);
|
|
||||||
}
|
|
||||||
|
|
||||||
label {
|
|
||||||
font-size: 13px;
|
|
||||||
color: var(--muted);
|
|
||||||
}
|
|
||||||
|
|
||||||
.ws-default {
|
|
||||||
font-size: 12px;
|
|
||||||
color: var(--muted);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Segmented pill control for Theme */
|
|
||||||
.segmented {
|
|
||||||
display: inline-flex;
|
|
||||||
align-items: stretch;
|
|
||||||
border: 1px solid var(--button-border);
|
|
||||||
background-color: var(--button-bg);
|
|
||||||
border-radius: 999px;
|
|
||||||
overflow: hidden;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented input[type="radio"] {
|
|
||||||
position: absolute;
|
|
||||||
opacity: 0;
|
|
||||||
pointer-events: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.theme-selector-container {
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
margin-top: 17px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented label {
|
|
||||||
display: inline-flex;
|
|
||||||
align-items: center;
|
|
||||||
gap: 6px;
|
|
||||||
padding: 6px 12px;
|
|
||||||
font-size: 14px;
|
|
||||||
color: var(--muted);
|
|
||||||
cursor: pointer;
|
|
||||||
user-select: none;
|
|
||||||
transition: background-color 0.2s ease, color 0.2s ease;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented label span {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented label:hover span {
|
|
||||||
display: inline;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented label:hover {
|
|
||||||
background-color: var(--chip-bg);
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented img {
|
|
||||||
width: 16px;
|
|
||||||
height: 16px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented input[type="radio"]:checked + label {
|
|
||||||
background-color: var(--chip-bg);
|
|
||||||
color: var(--text);
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented input[type="radio"]:focus-visible + label,
|
|
||||||
.segmented input[type="radio"]:focus + label {
|
|
||||||
outline: 2px solid #007bff;
|
|
||||||
outline-offset: 2px;
|
|
||||||
border-radius: 999px;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Transcript area */
|
|
||||||
#linesTranscript {
|
|
||||||
margin: 20px auto;
|
|
||||||
max-width: 700px;
|
|
||||||
text-align: left;
|
|
||||||
font-size: 16px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#linesTranscript p {
|
|
||||||
margin: 0px 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#linesTranscript strong {
|
|
||||||
color: var(--text);
|
|
||||||
}
|
|
||||||
|
|
||||||
#speaker {
|
|
||||||
border: 1px solid var(--border);
|
|
||||||
border-radius: 100px;
|
|
||||||
padding: 2px 10px;
|
|
||||||
font-size: 14px;
|
|
||||||
margin-bottom: 0px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.label_diarization {
|
|
||||||
background-color: var(--chip-bg);
|
|
||||||
border-radius: 8px 8px 8px 8px;
|
|
||||||
padding: 2px 10px;
|
|
||||||
margin-left: 10px;
|
|
||||||
display: inline-block;
|
|
||||||
white-space: nowrap;
|
|
||||||
font-size: 14px;
|
|
||||||
margin-bottom: 0px;
|
|
||||||
color: var(--label-dia-text);
|
|
||||||
}
|
|
||||||
|
|
||||||
.label_transcription {
|
|
||||||
background-color: var(--chip-bg);
|
|
||||||
border-radius: 8px 8px 8px 8px;
|
|
||||||
padding: 2px 10px;
|
|
||||||
display: inline-block;
|
|
||||||
white-space: nowrap;
|
|
||||||
margin-left: 10px;
|
|
||||||
font-size: 14px;
|
|
||||||
margin-bottom: 0px;
|
|
||||||
color: var(--label-trans-text);
|
|
||||||
}
|
|
||||||
|
|
||||||
#timeInfo {
|
|
||||||
color: var(--muted);
|
|
||||||
margin-left: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.textcontent {
|
|
||||||
font-size: 16px;
|
|
||||||
padding-left: 10px;
|
|
||||||
margin-bottom: 10px;
|
|
||||||
margin-top: 1px;
|
|
||||||
padding-top: 5px;
|
|
||||||
border-radius: 0px 0px 0px 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.buffer_diarization {
|
|
||||||
color: var(--label-dia-text);
|
|
||||||
margin-left: 4px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.buffer_transcription {
|
|
||||||
color: #7474748c;
|
|
||||||
margin-left: 4px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.spinner {
|
|
||||||
display: inline-block;
|
|
||||||
width: 8px;
|
|
||||||
height: 8px;
|
|
||||||
border: 2px solid var(--spinner-border);
|
|
||||||
border-top: 2px solid var(--spinner-top);
|
|
||||||
border-radius: 50%;
|
|
||||||
animation: spin 0.7s linear infinite;
|
|
||||||
vertical-align: middle;
|
|
||||||
margin-bottom: 2px;
|
|
||||||
margin-right: 5px;
|
|
||||||
}
|
|
||||||
|
|
||||||
@keyframes spin {
|
|
||||||
to {
|
|
||||||
transform: rotate(360deg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.silence {
|
|
||||||
color: var(--muted);
|
|
||||||
background-color: var(--silence-bg);
|
|
||||||
font-size: 13px;
|
|
||||||
border-radius: 30px;
|
|
||||||
padding: 2px 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.loading {
|
|
||||||
color: var(--muted);
|
|
||||||
background-color: var(--loading-bg);
|
|
||||||
border-radius: 8px 8px 8px 0px;
|
|
||||||
padding: 2px 10px;
|
|
||||||
font-size: 14px;
|
|
||||||
margin-bottom: 0px;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* for smaller screens */
|
|
||||||
/* @media (max-width: 450px) {
|
|
||||||
.settings-container {
|
|
||||||
flex-direction: column;
|
|
||||||
gap: 10px;
|
|
||||||
align-items: center;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings {
|
|
||||||
justify-content: center;
|
|
||||||
gap: 8px;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
.field {
|
|
||||||
align-items: center;
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
#websocketInput,
|
|
||||||
#microphoneSelect {
|
|
||||||
min-width: 200px;
|
|
||||||
max-width: 100%;
|
|
||||||
}
|
|
||||||
|
|
||||||
.theme-selector-container {
|
|
||||||
margin-top: 10px;
|
|
||||||
}
|
|
||||||
} */
|
|
||||||
|
|
||||||
/* @media (max-width: 768px) and (min-width: 451px) {
|
|
||||||
.settings-container {
|
|
||||||
gap: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings {
|
|
||||||
gap: 8px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#websocketInput,
|
|
||||||
#microphoneSelect {
|
|
||||||
min-width: 150px;
|
|
||||||
max-width: 300px;
|
|
||||||
}
|
|
||||||
} */
|
|
||||||
|
|
||||||
/* @media (max-width: 480px) {
|
|
||||||
body {
|
|
||||||
margin: 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings-toggle {
|
|
||||||
width: 35px;
|
|
||||||
height: 35px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings-toggle img {
|
|
||||||
width: 20px;
|
|
||||||
height: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.settings {
|
|
||||||
flex-direction: column;
|
|
||||||
align-items: center;
|
|
||||||
gap: 6px;
|
|
||||||
}
|
|
||||||
|
|
||||||
#websocketInput,
|
|
||||||
#microphoneSelect {
|
|
||||||
max-width: 400px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented label {
|
|
||||||
padding: 4px 8px;
|
|
||||||
font-size: 12px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.segmented img {
|
|
||||||
width: 14px;
|
|
||||||
height: 14px;
|
|
||||||
}
|
|
||||||
} */
|
|
||||||
|
|
||||||
|
|
||||||
html
|
|
||||||
{
|
|
||||||
width: 400px; /* max: 800px */
|
|
||||||
height: 600px; /* max: 600px */
|
|
||||||
border-radius: 10px;
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 -960 960 960" width="24px" fill="#5f6368"><path d="M480-120q-151 0-255.5-104.5T120-480q0-138 90-239.5T440-838q13-2 23 3.5t16 14.5q6 9 6.5 21t-7.5 23q-17 26-25.5 55t-8.5 61q0 90 63 153t153 63q31 0 61.5-9t54.5-25q11-7 22.5-6.5T819-479q10 5 15.5 15t3.5 24q-14 138-117.5 229T480-120Zm0-80q88 0 158-48.5T740-375q-20 5-40 8t-40 3q-123 0-209.5-86.5T364-660q0-20 3-40t8-40q-78 32-126.5 102T200-480q0 116 82 198t198 82Zm-10-270Z"/></svg>
|
|
||||||
|
Before Width: | Height: | Size: 493 B |
@@ -1 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 -960 960 960" width="24px" fill="#5f6368"><path d="M480-360q50 0 85-35t35-85q0-50-35-85t-85-35q-50 0-85 35t-35 85q0 50 35 85t85 35Zm0 80q-83 0-141.5-58.5T280-480q0-83 58.5-141.5T480-680q83 0 141.5 58.5T680-480q0 83-58.5 141.5T480-280ZM80-440q-17 0-28.5-11.5T40-480q0-17 11.5-28.5T80-520h80q17 0 28.5 11.5T200-480q0 17-11.5 28.5T160-440H80Zm720 0q-17 0-28.5-11.5T760-480q0-17 11.5-28.5T800-520h80q17 0 28.5 11.5T920-480q0 17-11.5 28.5T880-440h-80ZM480-760q-17 0-28.5-11.5T440-800v-80q0-17 11.5-28.5T480-920q17 0 28.5 11.5T520-880v80q0 17-11.5 28.5T480-760Zm0 720q-17 0-28.5-11.5T440-80v-80q0-17 11.5-28.5T480-200q17 0 28.5 11.5T520-160v80q0 17-11.5 28.5T480-40ZM226-678l-43-42q-12-11-11.5-28t11.5-29q12-12 29-12t28 12l42 43q11 12 11 28t-11 28q-11 12-27.5 11.5T226-678Zm494 495-42-43q-11-12-11-28.5t11-27.5q11-12 27.5-11.5T734-282l43 42q12 11 11.5 28T777-183q-12 12-29 12t-28-12Zm-42-495q-12-11-11.5-27.5T678-734l42-43q11-12 28-11.5t29 11.5q12 12 12 29t-12 28l-43 42q-12 11-28 11t-28-11ZM183-183q-12-12-12-29t12-28l43-42q12-11 28.5-11t27.5 11q12 11 11.5 27.5T282-226l-42 43q-11 12-28 11.5T183-183Zm297-297Z"/></svg>
|
|
||||||
|
Before Width: | Height: | Size: 1.2 KiB |
@@ -1 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 -960 960 960" width="24px" fill="#5f6368"><path d="M433-80q-27 0-46.5-18T363-142l-9-66q-13-5-24.5-12T307-235l-62 26q-25 11-50 2t-39-32l-47-82q-14-23-8-49t27-43l53-40q-1-7-1-13.5v-27q0-6.5 1-13.5l-53-40q-21-17-27-43t8-49l47-82q14-23 39-32t50 2l62 26q11-8 23-15t24-12l9-66q4-26 23.5-44t46.5-18h94q27 0 46.5 18t23.5 44l9 66q13 5 24.5 12t22.5 15l62-26q25-11 50-2t39 32l47 82q14 23 8 49t-27 43l-53 40q1 7 1 13.5v27q0 6.5-2 13.5l53 40q21 17 27 43t-8 49l-48 82q-14 23-39 32t-50-2l-60-26q-11 8-23 15t-24 12l-9 66q-4 26-23.5 44T527-80h-94Zm7-80h79l14-106q31-8 57.5-23.5T639-327l99 41 39-68-86-65q5-14 7-29.5t2-31.5q0-16-2-31.5t-7-29.5l86-65-39-68-99 42q-22-23-48.5-38.5T533-694l-13-106h-79l-14 106q-31 8-57.5 23.5T321-633l-99-41-39 68 86 64q-5 15-7 30t-2 32q0 16 2 31t7 30l-86 65 39 68 99-42q22 23 48.5 38.5T427-266l13 106Zm42-180q58 0 99-41t41-99q0-58-41-99t-99-41q-59 0-99.5 41T342-480q0 58 40.5 99t99.5 41Zm-2-140Z"/></svg>
|
|
||||||
|
Before Width: | Height: | Size: 982 B |
@@ -1 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 -960 960 960" width="24px" fill="#5f6368"><path d="M396-396q-32-32-58.5-67T289-537q-5 14-6.5 28.5T281-480q0 83 58 141t141 58q14 0 28.5-2t28.5-6q-39-22-74-48.5T396-396Zm85 196q-56 0-107-21t-91-61q-40-40-61-91t-21-107q0-51 17-97.5t50-84.5q13-14 32-9.5t27 24.5q21 55 52.5 104t73.5 91q42 42 91 73.5T648-326q20 8 24.5 27t-9.5 32q-38 33-84.5 50T481-200Zm223-192q-16-5-23-20.5t-4-32.5q9-48-6-94.5T621-621q-35-35-80.5-49.5T448-677q-17 3-32-4t-21-23q-6-16 1.5-31t23.5-19q69-15 138 4.5T679-678q51 51 71 120t5 138q-4 17-19 25t-32 3ZM480-840q-17 0-28.5-11.5T440-880v-40q0-17 11.5-28.5T480-960q17 0 28.5 11.5T520-920v40q0 17-11.5 28.5T480-840Zm0 840q-17 0-28.5-11.5T440-40v-40q0-17 11.5-28.5T480-120q17 0 28.5 11.5T520-80v40q0 17-11.5 28.5T480 0Zm255-734q-12-12-12-28.5t12-28.5l28-28q11-11 27.5-11t28.5 11q12 12 12 28.5T819-762l-28 28q-12 12-28 12t-28-12ZM141-141q-12-12-12-28.5t12-28.5l28-28q12-12 28-12t28 12q12 12 12 28.5T225-169l-28 28q-11 11-27.5 11T141-141Zm739-299q-17 0-28.5-11.5T840-480q0-17 11.5-28.5T880-520h40q17 0 28.5 11.5T960-480q0 17-11.5 28.5T920-440h-40Zm-840 0q-17 0-28.5-11.5T0-480q0-17 11.5-28.5T40-520h40q17 0 28.5 11.5T120-480q0 17-11.5 28.5T80-440H40Zm779 299q-12 12-28.5 12T762-141l-28-28q-12-12-12-28t12-28q12-12 28.5-12t28.5 12l28 28q11 11 11 27.5T819-141ZM226-735q-12 12-28.5 12T169-735l-28-28q-11-11-11-27.5t11-28.5q12-12 28.5-12t28.5 12l28 28q12 12 12 28t-12 28Zm170 339Z"/></svg>
|
|
||||||
|
Before Width: | Height: | Size: 1.4 KiB |
@@ -1,12 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>Welcome</title>
|
|
||||||
<script src="welcome.js"></script>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
This page exists to workaround an issue with Chrome that blocks permission
|
|
||||||
requests from chrome extensions
|
|
||||||
<!-- <button id="requestMicrophone">Request Microphone</button> -->
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
264
docs/API.md
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
# WhisperLiveKit WebSocket API Documentation
|
||||||
|
|
||||||
|
> !! **Note**: The new API structure described in this document is currently under deployment.
|
||||||
|
This documentation is intended for devs who want to build custom frontends.
|
||||||
|
|
||||||
|
WLK provides real-time speech transcription, speaker diarization, and translation through a WebSocket API. The server sends incremental updates as audio is processed, allowing clients to display live transcription results with minimal latency.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Legacy API (Current)
|
||||||
|
|
||||||
|
### Message Structure
|
||||||
|
|
||||||
|
The current API sends complete state snapshots on each update (several time per second)
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
"type": str,
|
||||||
|
"status": str,
|
||||||
|
"lines": [
|
||||||
|
{
|
||||||
|
"speaker": int,
|
||||||
|
"text": str,
|
||||||
|
"start": float,
|
||||||
|
"end": float,
|
||||||
|
"translation": str | null,
|
||||||
|
"detected_language": str
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"buffer_transcription": str,
|
||||||
|
"buffer_diarization": str,
|
||||||
|
"remaining_time_transcription": float,
|
||||||
|
"remaining_time_diarization": float
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## New API (Under Development)
|
||||||
|
|
||||||
|
### Philosophy
|
||||||
|
|
||||||
|
Principles:
|
||||||
|
|
||||||
|
- **Incremental Updates**: Only updates and new segments are sent
|
||||||
|
- **Ephemeral Buffers**: Temporary, unvalidated data displayed in real-time but overwritten on next update, at speaker level
|
||||||
|
|
||||||
|
|
||||||
|
## Message Format
|
||||||
|
|
||||||
|
|
||||||
|
```typescript
|
||||||
|
{
|
||||||
|
"type": "transcript_update",
|
||||||
|
"status": "active_transcription" | "no_audio_detected",
|
||||||
|
"segments": [
|
||||||
|
{
|
||||||
|
"id": number,
|
||||||
|
"speaker": number,
|
||||||
|
"text": string,
|
||||||
|
"start_speaker": float,
|
||||||
|
"start": float,
|
||||||
|
"end": float,
|
||||||
|
"language": string | null,
|
||||||
|
"translation": string,
|
||||||
|
"words": [
|
||||||
|
{
|
||||||
|
"text": string,
|
||||||
|
"start": float,
|
||||||
|
"end": float,
|
||||||
|
"validated": {
|
||||||
|
"text": boolean,
|
||||||
|
"speaker": boolean,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"buffer": {
|
||||||
|
"transcription": string,
|
||||||
|
"diarization": string,
|
||||||
|
"translation": string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"remaining_time_transcription": float,
|
||||||
|
"remaining_time_diarization": float
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Other Message Types
|
||||||
|
|
||||||
|
#### Config Message (sent on connection)
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "config",
|
||||||
|
"useAudioWorklet": true / false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Ready to Stop Message (sent after processing complete)
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "ready_to_stop"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Field Descriptions
|
||||||
|
|
||||||
|
### Segment Fields
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `id` | `number` | Unique identifier for this segment. Used by clients to update specific segments efficiently. |
|
||||||
|
| `speaker` | `number` | Speaker ID (1, 2, 3...). Special value `-2` indicates silence. |
|
||||||
|
| `text` | `string` | Validated transcription text for this update. Should be **appended** to the segment's text on the client side. |
|
||||||
|
| `start_speaker` | `float` | Timestamp (seconds) when this speaker segment began. |
|
||||||
|
| `start` | `float` | Timestamp (seconds) of the first word in this update. |
|
||||||
|
| `end` | `float` | Timestamp (seconds) of the last word in this update. |
|
||||||
|
| `language` | `string \| null` | ISO language code (e.g., "en", "fr"). `null` until language is detected. |
|
||||||
|
| `translation` | `string` | Validated translation text for this update. Should be **appended** to the segment's translation on the client side. |
|
||||||
|
| `words` | `Array` | Array of word-level objects with timing and validation information. |
|
||||||
|
| `buffer` | `Object` | Per-segment temporary buffers, see below |
|
||||||
|
|
||||||
|
### Word Object
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `text` | `string` | The word text. |
|
||||||
|
| `start` | `number` | Start timestamp (seconds) of this word. |
|
||||||
|
| `end` | `number` | End timestamp (seconds) of this word. |
|
||||||
|
| `validated.text` | `boolean` | Whether the transcription text has been validated. if false, word is also in buffer: transcription |
|
||||||
|
| `validated.speaker` | `boolean` | Whether the speaker assignment has been validated. if false, word is also in buffer: diarization |
|
||||||
|
| `validated.language` | `boolean` | Whether the language detection has been validated. if false, word is also in buffer: translation |
|
||||||
|
|
||||||
|
### Buffer Object (Per-Segment)
|
||||||
|
|
||||||
|
Buffers are **ephemeral**. They should be displayed to the user but not stored permanently in the frontend. Each update may contain a completely different buffer value, and previous buffer is likely to be in the next validated text.
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `transcription` | `string` | Pending transcription text. Displayed immediately but **overwritten** on next update. |
|
||||||
|
| `diarization` | `string` | Pending diarization text (text waiting for speaker assignment). Displayed immediately but **overwritten** on next update. |
|
||||||
|
| `translation` | `string` | Pending translation text. Displayed immediately but **overwritten** on next update. |
|
||||||
|
|
||||||
|
|
||||||
|
### Metadata Fields
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `remaining_time_transcription` | `float` | Seconds of audio waiting for transcription processing. |
|
||||||
|
| `remaining_time_diarization` | `float` | Seconds of audio waiting for speaker diarization. |
|
||||||
|
|
||||||
|
### Status Values
|
||||||
|
|
||||||
|
| Status | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| `active_transcription` | Normal operation, transcription is active. |
|
||||||
|
| `no_audio_detected` | No audio has been detected yet. |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Update Behavior
|
||||||
|
|
||||||
|
### Incremental Updates
|
||||||
|
|
||||||
|
The API sends **only changed or new segments**. Clients should:
|
||||||
|
|
||||||
|
1. Maintain a local map of segments by ID
|
||||||
|
2. When receiving an update, merge/update segments by ID
|
||||||
|
3. Render only the changed segments
|
||||||
|
|
||||||
|
### Language Detection
|
||||||
|
|
||||||
|
When language is detected for a segment:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
// Update 1: No language yet
|
||||||
|
{
|
||||||
|
"segments": [
|
||||||
|
{"id": 1, "speaker": 1, "text": "May see", "language": null}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update 2: Same segment ID, language now detected
|
||||||
|
{
|
||||||
|
"segments": [
|
||||||
|
{"id": 1, "speaker": 1, "text": "Merci", "language": "fr"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Client behavior**: **Replace** the existing segment with the same ID.
|
||||||
|
|
||||||
|
### Buffer Behavior
|
||||||
|
|
||||||
|
Buffers are **per-segment** to handle multi-speaker scenarios correctly.
|
||||||
|
|
||||||
|
#### Example: Translation with diarization and translation
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
// Update 1
|
||||||
|
{
|
||||||
|
"segments": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"speaker": 1,
|
||||||
|
"text": "Hello world, how are",
|
||||||
|
"translation": "",
|
||||||
|
"buffer": {
|
||||||
|
"transcription": "",
|
||||||
|
"diarization": " you on",
|
||||||
|
"translation": "Bonjour le monde"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Frontend ====
|
||||||
|
// <SPEAKER>1</SPEAKER>
|
||||||
|
// <TRANSCRIPTION>Hello world, how are <DIARIZATION BUFFER> you on</DIARIZATION BUFFER></TRANSCRIPTION>
|
||||||
|
// <TRANSLATION><TRANSLATION BUFFER>Bonjour le monde</TRANSLATION BUFFER></TRANSLATION>
|
||||||
|
|
||||||
|
|
||||||
|
// Update 2
|
||||||
|
{
|
||||||
|
"segments": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"speaker": 1,
|
||||||
|
"text": " you on this",
|
||||||
|
"translation": "Bonjour tout le monde",
|
||||||
|
"buffer": {
|
||||||
|
"transcription": "",
|
||||||
|
"diarization": " beautiful day",
|
||||||
|
"translation": ",comment"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// ==== Frontend ====
|
||||||
|
// <SPEAKER>1</SPEAKER>
|
||||||
|
// <TRANSCRIPTION>Hello world, how are you on this<DIARIZATION BUFFER> beautiful day</DIARIZATION BUFFER></TRANSCRIPTION>
|
||||||
|
// <TRANSLATION>Bonjour tout le monde<TRANSLATION BUFFER>, comment</TRANSLATION BUFFER><TRANSLATION>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Silence Segments
|
||||||
|
|
||||||
|
Silence is represented with the speaker id = `-2`:
|
||||||
|
|
||||||
|
```jsonc
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"speaker": -2,
|
||||||
|
"text": "",
|
||||||
|
"start": 10.5,
|
||||||
|
"end": 12.3
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "whisperlivekit"
|
name = "whisperlivekit"
|
||||||
version = "0.2.10"
|
version = "0.2.12"
|
||||||
description = "Real-time speech-to-text with speaker diarization using Whisper"
|
description = "Real-time speech-to-text with speaker diarization using Whisper"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
authors = [
|
authors = [
|
||||||
@@ -50,7 +50,7 @@ Homepage = "https://github.com/QuentinFuxa/WhisperLiveKit"
|
|||||||
whisperlivekit-server = "whisperlivekit.basic_server:main"
|
whisperlivekit-server = "whisperlivekit.basic_server:main"
|
||||||
|
|
||||||
[tool.setuptools]
|
[tool.setuptools]
|
||||||
packages = ["whisperlivekit", "whisperlivekit.diarization", "whisperlivekit.simul_whisper", "whisperlivekit.simul_whisper.whisper", "whisperlivekit.simul_whisper.whisper.assets", "whisperlivekit.simul_whisper.whisper.normalizers", "whisperlivekit.web", "whisperlivekit.whisper_streaming_custom"]
|
packages = ["whisperlivekit", "whisperlivekit.diarization", "whisperlivekit.simul_whisper", "whisperlivekit.simul_whisper.whisper", "whisperlivekit.simul_whisper.whisper.assets", "whisperlivekit.simul_whisper.whisper.normalizers", "whisperlivekit.web", "whisperlivekit.whisper_streaming_custom", "whisperlivekit.translation"]
|
||||||
|
|
||||||
[tool.setuptools.package-data]
|
[tool.setuptools.package-data]
|
||||||
whisperlivekit = ["web/*.html", "web/*.css", "web/*.js", "web/src/*.svg"]
|
whisperlivekit = ["web/*.html", "web/*.css", "web/*.js", "web/src/*.svg"]
|
||||||
|
|||||||
38
sync_extension.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def sync_extension_files():
|
||||||
|
"""Copy core files from web directory to Chrome extension directory."""
|
||||||
|
|
||||||
|
web_dir = Path("whisperlivekit/web")
|
||||||
|
extension_dir = Path("chrome-extension")
|
||||||
|
|
||||||
|
files_to_sync = [
|
||||||
|
"live_transcription.html", "live_transcription.js", "live_transcription.css"
|
||||||
|
]
|
||||||
|
|
||||||
|
svg_files = [
|
||||||
|
"system_mode.svg",
|
||||||
|
"light_mode.svg",
|
||||||
|
"dark_mode.svg",
|
||||||
|
"settings.svg"
|
||||||
|
]
|
||||||
|
|
||||||
|
for file in files_to_sync:
|
||||||
|
src_path = web_dir / file
|
||||||
|
dest_path = extension_dir / file
|
||||||
|
|
||||||
|
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
shutil.copy2(src_path, dest_path)
|
||||||
|
|
||||||
|
for svg_file in svg_files:
|
||||||
|
src_path = web_dir / "src" / svg_file
|
||||||
|
dest_path = extension_dir / "web" / "src" / svg_file
|
||||||
|
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
shutil.copy2(src_path, dest_path)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
sync_extension_files()
|
||||||
@@ -4,18 +4,30 @@ from time import time, sleep
|
|||||||
import math
|
import math
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
import traceback
|
||||||
from whisperlivekit.timed_objects import ASRToken, Silence, Line, FrontData, State, Transcript
|
from whisperlivekit.timed_objects import ASRToken, Silence, Line, FrontData, State, Transcript, ChangeSpeaker
|
||||||
from whisperlivekit.core import TranscriptionEngine, online_factory, online_diarization_factory, online_translation_factory
|
from whisperlivekit.core import TranscriptionEngine, online_factory, online_diarization_factory, online_translation_factory
|
||||||
from whisperlivekit.silero_vad_iterator import FixedVADIterator
|
from whisperlivekit.silero_vad_iterator import FixedVADIterator
|
||||||
from whisperlivekit.results_formater import format_output
|
from whisperlivekit.results_formater import format_output
|
||||||
from whisperlivekit.ffmpeg_manager import FFmpegManager, FFmpegState
|
from whisperlivekit.ffmpeg_manager import FFmpegManager, FFmpegState
|
||||||
# Set up logging once
|
|
||||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
logger.setLevel(logging.DEBUG)
|
logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
SENTINEL = object() # unique sentinel object for end of stream marker
|
SENTINEL = object() # unique sentinel object for end of stream marker
|
||||||
|
|
||||||
|
def cut_at(cumulative_pcm, cut_sec):
|
||||||
|
cumulative_len = 0
|
||||||
|
cut_sample = int(cut_sec * 16000)
|
||||||
|
|
||||||
|
for ind, pcm_array in enumerate(cumulative_pcm):
|
||||||
|
if (cumulative_len + len(pcm_array)) >= cut_sample:
|
||||||
|
cut_chunk = cut_sample - cumulative_len
|
||||||
|
before = np.concatenate(cumulative_pcm[:ind] + [cumulative_pcm[ind][:cut_chunk]])
|
||||||
|
after = [cumulative_pcm[ind][cut_chunk:]] + cumulative_pcm[ind+1:]
|
||||||
|
return before, after
|
||||||
|
cumulative_len += len(pcm_array)
|
||||||
|
return np.concatenate(cumulative_pcm), []
|
||||||
|
|
||||||
async def get_all_from_queue(queue):
|
async def get_all_from_queue(queue):
|
||||||
items = []
|
items = []
|
||||||
@@ -50,28 +62,32 @@ class AudioProcessor:
|
|||||||
self.bytes_per_sec = self.samples_per_sec * self.bytes_per_sample
|
self.bytes_per_sec = self.samples_per_sec * self.bytes_per_sample
|
||||||
self.max_bytes_per_sec = 32000 * 5 # 5 seconds of audio at 32 kHz
|
self.max_bytes_per_sec = 32000 * 5 # 5 seconds of audio at 32 kHz
|
||||||
self.is_pcm_input = self.args.pcm_input
|
self.is_pcm_input = self.args.pcm_input
|
||||||
self.debug = False
|
|
||||||
|
|
||||||
# State management
|
# State management
|
||||||
self.is_stopping = False
|
self.is_stopping = False
|
||||||
self.silence = False
|
self.silence = False
|
||||||
self.silence_duration = 0.0
|
self.silence_duration = 0.0
|
||||||
self.tokens = []
|
self.tokens = []
|
||||||
|
self.last_validated_token = 0
|
||||||
self.translated_segments = []
|
self.translated_segments = []
|
||||||
self.buffer_transcription = Transcript()
|
self.buffer_transcription = Transcript()
|
||||||
self.buffer_diarization = ""
|
|
||||||
self.end_buffer = 0
|
self.end_buffer = 0
|
||||||
self.end_attributed_speaker = 0
|
self.end_attributed_speaker = 0
|
||||||
self.lock = asyncio.Lock()
|
self.lock = asyncio.Lock()
|
||||||
self.beg_loop = None #to deal with a potential little lag at the websocket initialization, this is now set in process_audio
|
self.beg_loop = 0.0 #to deal with a potential little lag at the websocket initialization, this is now set in process_audio
|
||||||
self.sep = " " # Default separator
|
self.sep = " " # Default separator
|
||||||
self.last_response_content = FrontData()
|
self.last_response_content = FrontData()
|
||||||
self.last_detected_speaker = None
|
self.last_detected_speaker = None
|
||||||
self.speaker_languages = {}
|
self.speaker_languages = {}
|
||||||
|
self.diarization_before_transcription = False
|
||||||
|
|
||||||
|
if self.diarization_before_transcription:
|
||||||
|
self.cumulative_pcm = []
|
||||||
|
self.last_start = 0.0
|
||||||
|
self.last_end = 0.0
|
||||||
|
|
||||||
# Models and processing
|
# Models and processing
|
||||||
self.asr = models.asr
|
self.asr = models.asr
|
||||||
self.tokenizer = models.tokenizer
|
|
||||||
self.vac_model = models.vac_model
|
self.vac_model = models.vac_model
|
||||||
if self.args.vac:
|
if self.args.vac:
|
||||||
self.vac = FixedVADIterator(models.vac_model)
|
self.vac = FixedVADIterator(models.vac_model)
|
||||||
@@ -99,16 +115,21 @@ class AudioProcessor:
|
|||||||
|
|
||||||
self.transcription_task = None
|
self.transcription_task = None
|
||||||
self.diarization_task = None
|
self.diarization_task = None
|
||||||
|
self.translation_task = None
|
||||||
self.watchdog_task = None
|
self.watchdog_task = None
|
||||||
self.all_tasks_for_cleanup = []
|
self.all_tasks_for_cleanup = []
|
||||||
|
|
||||||
|
self.transcription = None
|
||||||
|
self.translation = None
|
||||||
|
self.diarization = None
|
||||||
|
|
||||||
if self.args.transcription:
|
if self.args.transcription:
|
||||||
self.online = online_factory(self.args, models.asr, models.tokenizer)
|
self.transcription = online_factory(self.args, models.asr)
|
||||||
self.sep = self.online.asr.sep
|
self.sep = self.transcription.asr.sep
|
||||||
if self.args.diarization:
|
if self.args.diarization:
|
||||||
self.diarization = online_diarization_factory(self.args, models.diarization_model)
|
self.diarization = online_diarization_factory(self.args, models.diarization_model)
|
||||||
if self.args.target_language:
|
if models.translation_model:
|
||||||
self.online_translation = online_translation_factory(self.args, models.translation_model)
|
self.translation = online_translation_factory(self.args, models.translation_model)
|
||||||
|
|
||||||
def convert_pcm_to_float(self, pcm_buffer):
|
def convert_pcm_to_float(self, pcm_buffer):
|
||||||
"""Convert PCM buffer in s16le format to normalized NumPy array."""
|
"""Convert PCM buffer in s16le format to normalized NumPy array."""
|
||||||
@@ -117,7 +138,7 @@ class AudioProcessor:
|
|||||||
async def add_dummy_token(self):
|
async def add_dummy_token(self):
|
||||||
"""Placeholder token when no transcription is available."""
|
"""Placeholder token when no transcription is available."""
|
||||||
async with self.lock:
|
async with self.lock:
|
||||||
current_time = time() - self.beg_loop if self.beg_loop else 0
|
current_time = time() - self.beg_loop
|
||||||
self.tokens.append(ASRToken(
|
self.tokens.append(ASRToken(
|
||||||
start=current_time, end=current_time + 1,
|
start=current_time, end=current_time + 1,
|
||||||
text=".", speaker=-1, is_dummy=True
|
text=".", speaker=-1, is_dummy=True
|
||||||
@@ -140,9 +161,9 @@ class AudioProcessor:
|
|||||||
|
|
||||||
return State(
|
return State(
|
||||||
tokens=self.tokens.copy(),
|
tokens=self.tokens.copy(),
|
||||||
|
last_validated_token=self.last_validated_token,
|
||||||
translated_segments=self.translated_segments.copy(),
|
translated_segments=self.translated_segments.copy(),
|
||||||
buffer_transcription=self.buffer_transcription,
|
buffer_transcription=self.buffer_transcription,
|
||||||
buffer_diarization=self.buffer_diarization,
|
|
||||||
end_buffer=self.end_buffer,
|
end_buffer=self.end_buffer,
|
||||||
end_attributed_speaker=self.end_attributed_speaker,
|
end_attributed_speaker=self.end_attributed_speaker,
|
||||||
remaining_time_transcription=remaining_transcription,
|
remaining_time_transcription=remaining_transcription,
|
||||||
@@ -154,7 +175,7 @@ class AudioProcessor:
|
|||||||
async with self.lock:
|
async with self.lock:
|
||||||
self.tokens = []
|
self.tokens = []
|
||||||
self.translated_segments = []
|
self.translated_segments = []
|
||||||
self.buffer_transcription = self.buffer_diarization = Transcript()
|
self.buffer_transcription = Transcript()
|
||||||
self.end_buffer = self.end_attributed_speaker = 0
|
self.end_buffer = self.end_attributed_speaker = 0
|
||||||
self.beg_loop = time()
|
self.beg_loop = time()
|
||||||
|
|
||||||
@@ -201,11 +222,11 @@ class AudioProcessor:
|
|||||||
await asyncio.sleep(0.2)
|
await asyncio.sleep(0.2)
|
||||||
|
|
||||||
logger.info("FFmpeg stdout processing finished. Signaling downstream processors if needed.")
|
logger.info("FFmpeg stdout processing finished. Signaling downstream processors if needed.")
|
||||||
if self.args.transcription and self.transcription_queue:
|
if not self.diarization_before_transcription and self.transcription_queue:
|
||||||
await self.transcription_queue.put(SENTINEL)
|
await self.transcription_queue.put(SENTINEL)
|
||||||
if self.args.diarization and self.diarization_queue:
|
if self.diarization:
|
||||||
await self.diarization_queue.put(SENTINEL)
|
await self.diarization_queue.put(SENTINEL)
|
||||||
if self.args.target_language and self.translation_queue:
|
if self.translation:
|
||||||
await self.translation_queue.put(SENTINEL)
|
await self.translation_queue.put(SENTINEL)
|
||||||
|
|
||||||
async def transcription_processor(self):
|
async def transcription_processor(self):
|
||||||
@@ -219,13 +240,8 @@ class AudioProcessor:
|
|||||||
logger.debug("Transcription processor received sentinel. Finishing.")
|
logger.debug("Transcription processor received sentinel. Finishing.")
|
||||||
self.transcription_queue.task_done()
|
self.transcription_queue.task_done()
|
||||||
break
|
break
|
||||||
|
|
||||||
if not self.online:
|
|
||||||
logger.warning("Transcription processor: self.online not initialized.")
|
|
||||||
self.transcription_queue.task_done()
|
|
||||||
continue
|
|
||||||
|
|
||||||
asr_internal_buffer_duration_s = len(getattr(self.online, 'audio_buffer', [])) / self.online.SAMPLING_RATE
|
asr_internal_buffer_duration_s = len(getattr(self.transcription, 'audio_buffer', [])) / self.transcription.SAMPLING_RATE
|
||||||
transcription_lag_s = max(0.0, time() - self.beg_loop - self.end_buffer)
|
transcription_lag_s = max(0.0, time() - self.beg_loop - self.end_buffer)
|
||||||
asr_processing_logs = f"internal_buffer={asr_internal_buffer_duration_s:.2f}s | lag={transcription_lag_s:.2f}s |"
|
asr_processing_logs = f"internal_buffer={asr_internal_buffer_duration_s:.2f}s | lag={transcription_lag_s:.2f}s |"
|
||||||
if type(item) is Silence:
|
if type(item) is Silence:
|
||||||
@@ -234,23 +250,23 @@ class AudioProcessor:
|
|||||||
asr_processing_logs += f" | last_end = {self.tokens[-1].end} |"
|
asr_processing_logs += f" | last_end = {self.tokens[-1].end} |"
|
||||||
logger.info(asr_processing_logs)
|
logger.info(asr_processing_logs)
|
||||||
cumulative_pcm_duration_stream_time += item.duration
|
cumulative_pcm_duration_stream_time += item.duration
|
||||||
self.online.insert_silence(item.duration, self.tokens[-1].end if self.tokens else 0)
|
self.transcription.insert_silence(item.duration, self.tokens[-1].end if self.tokens else 0)
|
||||||
continue
|
continue
|
||||||
logger.info(asr_processing_logs)
|
elif isinstance(item, ChangeSpeaker):
|
||||||
|
self.transcription.new_speaker(item)
|
||||||
if isinstance(item, np.ndarray):
|
elif isinstance(item, np.ndarray):
|
||||||
pcm_array = item
|
pcm_array = item
|
||||||
else:
|
|
||||||
raise Exception('item should be pcm_array')
|
logger.info(asr_processing_logs)
|
||||||
|
|
||||||
duration_this_chunk = len(pcm_array) / self.sample_rate
|
duration_this_chunk = len(pcm_array) / self.sample_rate
|
||||||
cumulative_pcm_duration_stream_time += duration_this_chunk
|
cumulative_pcm_duration_stream_time += duration_this_chunk
|
||||||
stream_time_end_of_current_pcm = cumulative_pcm_duration_stream_time
|
stream_time_end_of_current_pcm = cumulative_pcm_duration_stream_time
|
||||||
|
|
||||||
self.online.insert_audio_chunk(pcm_array, stream_time_end_of_current_pcm)
|
self.transcription.insert_audio_chunk(pcm_array, stream_time_end_of_current_pcm)
|
||||||
new_tokens, current_audio_processed_upto = await asyncio.to_thread(self.online.process_iter)
|
new_tokens, current_audio_processed_upto = await asyncio.to_thread(self.transcription.process_iter)
|
||||||
|
|
||||||
_buffer_transcript = self.online.get_buffer()
|
_buffer_transcript = self.transcription.get_buffer()
|
||||||
buffer_text = _buffer_transcript.text
|
buffer_text = _buffer_transcript.text
|
||||||
|
|
||||||
if new_tokens:
|
if new_tokens:
|
||||||
@@ -297,8 +313,9 @@ class AudioProcessor:
|
|||||||
|
|
||||||
async def diarization_processor(self, diarization_obj):
|
async def diarization_processor(self, diarization_obj):
|
||||||
"""Process audio chunks for speaker diarization."""
|
"""Process audio chunks for speaker diarization."""
|
||||||
buffer_diarization = ""
|
if self.diarization_before_transcription:
|
||||||
cumulative_pcm_duration_stream_time = 0.0
|
self.current_speaker = 0
|
||||||
|
await self.transcription_queue.put(ChangeSpeaker(speaker=self.current_speaker, start=0.0))
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
item = await self.diarization_queue.get()
|
item = await self.diarization_queue.get()
|
||||||
@@ -307,7 +324,6 @@ class AudioProcessor:
|
|||||||
self.diarization_queue.task_done()
|
self.diarization_queue.task_done()
|
||||||
break
|
break
|
||||||
elif type(item) is Silence:
|
elif type(item) is Silence:
|
||||||
cumulative_pcm_duration_stream_time += item.duration
|
|
||||||
diarization_obj.insert_silence(item.duration)
|
diarization_obj.insert_silence(item.duration)
|
||||||
continue
|
continue
|
||||||
elif isinstance(item, np.ndarray):
|
elif isinstance(item, np.ndarray):
|
||||||
@@ -315,24 +331,41 @@ class AudioProcessor:
|
|||||||
else:
|
else:
|
||||||
raise Exception('item should be pcm_array')
|
raise Exception('item should be pcm_array')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Process diarization
|
# Process diarization
|
||||||
await diarization_obj.diarize(pcm_array)
|
await diarization_obj.diarize(pcm_array)
|
||||||
|
if self.diarization_before_transcription:
|
||||||
async with self.lock:
|
segments = diarization_obj.get_segments()
|
||||||
self.tokens, last_segment = diarization_obj.assign_speakers_to_tokens(
|
self.cumulative_pcm.append(pcm_array)
|
||||||
self.tokens,
|
if segments:
|
||||||
use_punctuation_split=self.args.punctuation_split
|
last_segment = segments[-1]
|
||||||
)
|
if last_segment.speaker != self.current_speaker:
|
||||||
if len(self.tokens) > 0:
|
cut_sec = last_segment.start - self.last_end
|
||||||
self.end_attributed_speaker = max(self.tokens[-1].end, self.end_attributed_speaker)
|
to_transcript, self.cumulative_pcm = cut_at(self.cumulative_pcm, cut_sec)
|
||||||
if buffer_diarization:
|
await self.transcription_queue.put(to_transcript)
|
||||||
self.buffer_diarization = buffer_diarization
|
|
||||||
|
self.current_speaker = last_segment.speaker
|
||||||
# if last_segment is not None and last_segment.speaker != self.last_detected_speaker:
|
await self.transcription_queue.put(ChangeSpeaker(speaker=self.current_speaker, start=last_segment.start))
|
||||||
# if not self.speaker_languages.get(last_segment.speaker, None):
|
|
||||||
# self.last_detected_speaker = last_segment.speaker
|
cut_sec = last_segment.end - last_segment.start
|
||||||
# self.online.on_new_speaker(last_segment)
|
to_transcript, self.cumulative_pcm = cut_at(self.cumulative_pcm, cut_sec)
|
||||||
|
await self.transcription_queue.put(to_transcript)
|
||||||
|
self.last_start = last_segment.start
|
||||||
|
self.last_end = last_segment.end
|
||||||
|
else:
|
||||||
|
cut_sec = last_segment.end - self.last_end
|
||||||
|
to_transcript, self.cumulative_pcm = cut_at(self.cumulative_pcm, cut_sec)
|
||||||
|
await self.transcription_queue.put(to_transcript)
|
||||||
|
self.last_end = last_segment.end
|
||||||
|
elif not self.diarization_before_transcription:
|
||||||
|
async with self.lock:
|
||||||
|
self.tokens = diarization_obj.assign_speakers_to_tokens(
|
||||||
|
self.tokens,
|
||||||
|
use_punctuation_split=self.args.punctuation_split
|
||||||
|
)
|
||||||
|
if len(self.tokens) > 0:
|
||||||
|
self.end_attributed_speaker = max(self.tokens[-1].end, self.end_attributed_speaker)
|
||||||
self.diarization_queue.task_done()
|
self.diarization_queue.task_done()
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -342,7 +375,7 @@ class AudioProcessor:
|
|||||||
self.diarization_queue.task_done()
|
self.diarization_queue.task_done()
|
||||||
logger.info("Diarization processor task finished.")
|
logger.info("Diarization processor task finished.")
|
||||||
|
|
||||||
async def translation_processor(self, online_translation):
|
async def translation_processor(self):
|
||||||
# the idea is to ignore diarization for the moment. We use only transcription tokens.
|
# the idea is to ignore diarization for the moment. We use only transcription tokens.
|
||||||
# And the speaker is attributed given the segments used for the translation
|
# And the speaker is attributed given the segments used for the translation
|
||||||
# in the future we want to have different languages for each speaker etc, so it will be more complex.
|
# in the future we want to have different languages for each speaker etc, so it will be more complex.
|
||||||
@@ -354,7 +387,7 @@ class AudioProcessor:
|
|||||||
self.translation_queue.task_done()
|
self.translation_queue.task_done()
|
||||||
break
|
break
|
||||||
elif type(item) is Silence:
|
elif type(item) is Silence:
|
||||||
online_translation.insert_silence(item.duration)
|
self.translation.insert_silence(item.duration)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# get all the available tokens for translation. The more words, the more precise
|
# get all the available tokens for translation. The more words, the more precise
|
||||||
@@ -366,11 +399,14 @@ class AudioProcessor:
|
|||||||
if additional_token is SENTINEL:
|
if additional_token is SENTINEL:
|
||||||
sentinel_found = True
|
sentinel_found = True
|
||||||
break
|
break
|
||||||
tokens_to_process.append(additional_token)
|
elif type(additional_token) is Silence:
|
||||||
|
self.translation.insert_silence(additional_token.duration)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
tokens_to_process.append(additional_token)
|
||||||
if tokens_to_process:
|
if tokens_to_process:
|
||||||
online_translation.insert_tokens(tokens_to_process)
|
self.translation.insert_tokens(tokens_to_process)
|
||||||
self.translated_segments = await asyncio.to_thread(online_translation.process)
|
self.translated_segments = await asyncio.to_thread(self.translation.process)
|
||||||
|
|
||||||
self.translation_queue.task_done()
|
self.translation_queue.task_done()
|
||||||
for _ in additional_tokens:
|
for _ in additional_tokens:
|
||||||
self.translation_queue.task_done()
|
self.translation_queue.task_done()
|
||||||
@@ -393,53 +429,33 @@ class AudioProcessor:
|
|||||||
"""Format processing results for output."""
|
"""Format processing results for output."""
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# If FFmpeg error occurred, notify front-end
|
|
||||||
if self._ffmpeg_error:
|
if self._ffmpeg_error:
|
||||||
yield FrontData(
|
yield FrontData(status="error", error=f"FFmpeg error: {self._ffmpeg_error}")
|
||||||
status="error",
|
|
||||||
error=f"FFmpeg error: {self._ffmpeg_error}"
|
|
||||||
)
|
|
||||||
self._ffmpeg_error = None
|
self._ffmpeg_error = None
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Get current state
|
|
||||||
state = await self.get_current_state()
|
state = await self.get_current_state()
|
||||||
|
|
||||||
# Add dummy tokens if needed
|
|
||||||
if (not state.tokens or state.tokens[-1].is_dummy) and not self.args.transcription and self.args.diarization:
|
|
||||||
await self.add_dummy_token()
|
|
||||||
sleep(0.5)
|
|
||||||
state = await self.get_current_state()
|
|
||||||
|
|
||||||
# Format output
|
|
||||||
lines, undiarized_text, end_w_silence = format_output(
|
lines, undiarized_text = format_output(
|
||||||
state,
|
state,
|
||||||
self.silence,
|
self.silence,
|
||||||
current_time = time() - self.beg_loop if self.beg_loop else None,
|
current_time = time() - self.beg_loop,
|
||||||
args = self.args,
|
args = self.args,
|
||||||
debug = self.debug,
|
|
||||||
sep=self.sep
|
sep=self.sep
|
||||||
)
|
)
|
||||||
if end_w_silence:
|
if lines and lines[-1].speaker == -2:
|
||||||
buffer_transcription = Transcript()
|
buffer_transcription = Transcript()
|
||||||
buffer_diarization = Transcript()
|
|
||||||
else:
|
else:
|
||||||
buffer_transcription = state.buffer_transcription
|
buffer_transcription = state.buffer_transcription
|
||||||
buffer_diarization = state.buffer_diarization
|
|
||||||
|
|
||||||
# Handle undiarized text
|
buffer_diarization = ''
|
||||||
if undiarized_text:
|
if undiarized_text:
|
||||||
combined = self.sep.join(undiarized_text)
|
buffer_diarization = self.sep.join(undiarized_text)
|
||||||
if buffer_transcription:
|
|
||||||
combined += self.sep
|
|
||||||
|
|
||||||
async with self.lock:
|
async with self.lock:
|
||||||
self.end_attributed_speaker = state.end_attributed_speaker
|
self.end_attributed_speaker = state.end_attributed_speaker
|
||||||
if buffer_diarization:
|
|
||||||
self.buffer_diarization = buffer_diarization
|
|
||||||
|
|
||||||
buffer_diarization.text = combined
|
|
||||||
|
|
||||||
response_status = "active_transcription"
|
response_status = "active_transcription"
|
||||||
if not state.tokens and not buffer_transcription and not buffer_diarization:
|
if not state.tokens and not buffer_transcription and not buffer_diarization:
|
||||||
@@ -455,8 +471,8 @@ class AudioProcessor:
|
|||||||
response = FrontData(
|
response = FrontData(
|
||||||
status=response_status,
|
status=response_status,
|
||||||
lines=lines,
|
lines=lines,
|
||||||
buffer_transcription=buffer_transcription.text,
|
buffer_transcription=buffer_transcription.text.strip(),
|
||||||
buffer_diarization=buffer_transcription.text,
|
buffer_diarization=buffer_diarization,
|
||||||
remaining_time_transcription=state.remaining_time_transcription,
|
remaining_time_transcription=state.remaining_time_transcription,
|
||||||
remaining_time_diarization=state.remaining_time_diarization if self.args.diarization else 0
|
remaining_time_diarization=state.remaining_time_diarization if self.args.diarization else 0
|
||||||
)
|
)
|
||||||
@@ -505,18 +521,18 @@ class AudioProcessor:
|
|||||||
self.all_tasks_for_cleanup.append(self.ffmpeg_reader_task)
|
self.all_tasks_for_cleanup.append(self.ffmpeg_reader_task)
|
||||||
processing_tasks_for_watchdog.append(self.ffmpeg_reader_task)
|
processing_tasks_for_watchdog.append(self.ffmpeg_reader_task)
|
||||||
|
|
||||||
if self.args.transcription and self.online:
|
if self.transcription:
|
||||||
self.transcription_task = asyncio.create_task(self.transcription_processor())
|
self.transcription_task = asyncio.create_task(self.transcription_processor())
|
||||||
self.all_tasks_for_cleanup.append(self.transcription_task)
|
self.all_tasks_for_cleanup.append(self.transcription_task)
|
||||||
processing_tasks_for_watchdog.append(self.transcription_task)
|
processing_tasks_for_watchdog.append(self.transcription_task)
|
||||||
|
|
||||||
if self.args.diarization and self.diarization:
|
if self.diarization:
|
||||||
self.diarization_task = asyncio.create_task(self.diarization_processor(self.diarization))
|
self.diarization_task = asyncio.create_task(self.diarization_processor(self.diarization))
|
||||||
self.all_tasks_for_cleanup.append(self.diarization_task)
|
self.all_tasks_for_cleanup.append(self.diarization_task)
|
||||||
processing_tasks_for_watchdog.append(self.diarization_task)
|
processing_tasks_for_watchdog.append(self.diarization_task)
|
||||||
|
|
||||||
if self.args.target_language and self.args.lan != 'auto':
|
if self.translation:
|
||||||
self.translation_task = asyncio.create_task(self.translation_processor(self.online_translation))
|
self.translation_task = asyncio.create_task(self.translation_processor())
|
||||||
self.all_tasks_for_cleanup.append(self.translation_task)
|
self.all_tasks_for_cleanup.append(self.translation_task)
|
||||||
processing_tasks_for_watchdog.append(self.translation_task)
|
processing_tasks_for_watchdog.append(self.translation_task)
|
||||||
|
|
||||||
@@ -566,7 +582,7 @@ class AudioProcessor:
|
|||||||
logger.info("FFmpeg manager stopped.")
|
logger.info("FFmpeg manager stopped.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Error stopping FFmpeg manager: {e}")
|
logger.warning(f"Error stopping FFmpeg manager: {e}")
|
||||||
if self.args.diarization and hasattr(self, 'dianization') and hasattr(self.diarization, 'close'):
|
if self.diarization:
|
||||||
self.diarization.close()
|
self.diarization.close()
|
||||||
logger.info("AudioProcessor cleanup complete.")
|
logger.info("AudioProcessor cleanup complete.")
|
||||||
|
|
||||||
@@ -619,9 +635,13 @@ class AudioProcessor:
|
|||||||
f"Consider using a smaller model."
|
f"Consider using a smaller model."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process audio chunk
|
chunk_size = min(len(self.pcm_buffer), self.max_bytes_per_sec)
|
||||||
pcm_array = self.convert_pcm_to_float(self.pcm_buffer[:self.max_bytes_per_sec])
|
aligned_chunk_size = (chunk_size // self.bytes_per_sample) * self.bytes_per_sample
|
||||||
self.pcm_buffer = self.pcm_buffer[self.max_bytes_per_sec:]
|
|
||||||
|
if aligned_chunk_size == 0:
|
||||||
|
return
|
||||||
|
pcm_array = self.convert_pcm_to_float(self.pcm_buffer[:aligned_chunk_size])
|
||||||
|
self.pcm_buffer = self.pcm_buffer[aligned_chunk_size:]
|
||||||
|
|
||||||
res = None
|
res = None
|
||||||
end_of_audio = False
|
end_of_audio = False
|
||||||
@@ -638,7 +658,7 @@ class AudioProcessor:
|
|||||||
silence_buffer = Silence(duration=time() - self.start_silence)
|
silence_buffer = Silence(duration=time() - self.start_silence)
|
||||||
|
|
||||||
if silence_buffer:
|
if silence_buffer:
|
||||||
if self.args.transcription and self.transcription_queue:
|
if not self.diarization_before_transcription and self.transcription_queue:
|
||||||
await self.transcription_queue.put(silence_buffer)
|
await self.transcription_queue.put(silence_buffer)
|
||||||
if self.args.diarization and self.diarization_queue:
|
if self.args.diarization and self.diarization_queue:
|
||||||
await self.diarization_queue.put(silence_buffer)
|
await self.diarization_queue.put(silence_buffer)
|
||||||
@@ -646,7 +666,7 @@ class AudioProcessor:
|
|||||||
await self.translation_queue.put(silence_buffer)
|
await self.translation_queue.put(silence_buffer)
|
||||||
|
|
||||||
if not self.silence:
|
if not self.silence:
|
||||||
if self.args.transcription and self.transcription_queue:
|
if not self.diarization_before_transcription and self.transcription_queue:
|
||||||
await self.transcription_queue.put(pcm_array.copy())
|
await self.transcription_queue.put(pcm_array.copy())
|
||||||
|
|
||||||
if self.args.diarization and self.diarization_queue:
|
if self.args.diarization and self.diarization_queue:
|
||||||
|
|||||||
@@ -5,9 +5,6 @@ from fastapi.middleware.cors import CORSMiddleware
|
|||||||
from whisperlivekit import TranscriptionEngine, AudioProcessor, get_inline_ui_html, parse_args
|
from whisperlivekit import TranscriptionEngine, AudioProcessor, get_inline_ui_html, parse_args
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from starlette.staticfiles import StaticFiles
|
|
||||||
import pathlib
|
|
||||||
import whisperlivekit.web as webpkg
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||||
logging.getLogger().setLevel(logging.WARNING)
|
logging.getLogger().setLevel(logging.WARNING)
|
||||||
@@ -33,8 +30,6 @@ app.add_middleware(
|
|||||||
allow_methods=["*"],
|
allow_methods=["*"],
|
||||||
allow_headers=["*"],
|
allow_headers=["*"],
|
||||||
)
|
)
|
||||||
web_dir = pathlib.Path(webpkg.__file__).parent
|
|
||||||
app.mount("/web", StaticFiles(directory=str(web_dir)), name="web")
|
|
||||||
|
|
||||||
@app.get("/")
|
@app.get("/")
|
||||||
async def get():
|
async def get():
|
||||||
@@ -123,6 +118,8 @@ def main():
|
|||||||
|
|
||||||
if ssl_kwargs:
|
if ssl_kwargs:
|
||||||
uvicorn_kwargs = {**uvicorn_kwargs, **ssl_kwargs}
|
uvicorn_kwargs = {**uvicorn_kwargs, **ssl_kwargs}
|
||||||
|
if args.forwarded_allow_ips:
|
||||||
|
uvicorn_kwargs = { **uvicorn_kwargs, "forwarded_allow_ips" : args.forwarded_allow_ips }
|
||||||
|
|
||||||
uvicorn.run(**uvicorn_kwargs)
|
uvicorn.run(**uvicorn_kwargs)
|
||||||
|
|
||||||
|
|||||||
@@ -4,10 +4,15 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
from .whisper_streaming_custom.whisper_online import backend_factory
|
from .whisper_streaming_custom.whisper_online import backend_factory
|
||||||
from .whisper_streaming_custom.online_asr import OnlineASRProcessor
|
from .whisper_streaming_custom.online_asr import OnlineASRProcessor
|
||||||
from whisperlivekit.warmup import warmup_asr
|
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
def update_with_kwargs(_dict, kwargs):
|
||||||
|
_dict.update({
|
||||||
|
k: v for k, v in kwargs.items() if k in _dict
|
||||||
|
})
|
||||||
|
return _dict
|
||||||
|
|
||||||
class TranscriptionEngine:
|
class TranscriptionEngine:
|
||||||
_instance = None
|
_instance = None
|
||||||
_initialized = False
|
_initialized = False
|
||||||
@@ -21,76 +26,48 @@ class TranscriptionEngine:
|
|||||||
if TranscriptionEngine._initialized:
|
if TranscriptionEngine._initialized:
|
||||||
return
|
return
|
||||||
|
|
||||||
defaults = {
|
global_params = {
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"port": 8000,
|
"port": 8000,
|
||||||
"warmup_file": None,
|
|
||||||
"diarization": False,
|
"diarization": False,
|
||||||
"punctuation_split": False,
|
"punctuation_split": False,
|
||||||
"min_chunk_size": 0.5,
|
|
||||||
"model": "tiny",
|
|
||||||
"model_cache_dir": None,
|
|
||||||
"model_dir": None,
|
|
||||||
"lan": "auto",
|
|
||||||
"task": "transcribe",
|
|
||||||
"target_language": "",
|
"target_language": "",
|
||||||
"backend": "faster-whisper",
|
|
||||||
"vac": True,
|
"vac": True,
|
||||||
"vac_chunk_size": 0.04,
|
"vac_chunk_size": 0.04,
|
||||||
"log_level": "DEBUG",
|
"log_level": "DEBUG",
|
||||||
"ssl_certfile": None,
|
"ssl_certfile": None,
|
||||||
"ssl_keyfile": None,
|
"ssl_keyfile": None,
|
||||||
|
"forwarded_allow_ips": None,
|
||||||
"transcription": True,
|
"transcription": True,
|
||||||
"vad": True,
|
"vad": True,
|
||||||
"pcm_input": False,
|
"pcm_input": False,
|
||||||
|
|
||||||
# whisperstreaming params:
|
|
||||||
"buffer_trimming": "segment",
|
|
||||||
"confidence_validation": False,
|
|
||||||
"buffer_trimming_sec": 15,
|
|
||||||
|
|
||||||
# simulstreaming params:
|
|
||||||
"disable_fast_encoder": False,
|
|
||||||
"frame_threshold": 25,
|
|
||||||
"beams": 1,
|
|
||||||
"decoder_type": None,
|
|
||||||
"audio_max_len": 20.0,
|
|
||||||
"audio_min_len": 0.0,
|
|
||||||
"cif_ckpt_path": None,
|
|
||||||
"never_fire": False,
|
|
||||||
"init_prompt": None,
|
|
||||||
"static_init_prompt": None,
|
|
||||||
"max_context_tokens": None,
|
|
||||||
"model_path": './base.pt',
|
|
||||||
"diarization_backend": "sortformer",
|
|
||||||
|
|
||||||
# diarization params:
|
|
||||||
"disable_punctuation_split" : False,
|
"disable_punctuation_split" : False,
|
||||||
"segmentation_model": "pyannote/segmentation-3.0",
|
"diarization_backend": "sortformer",
|
||||||
"embedding_model": "pyannote/embedding",
|
|
||||||
|
|
||||||
# translation params:
|
|
||||||
"nllb_backend": "ctranslate2",
|
|
||||||
"nllb_size": "600M"
|
|
||||||
}
|
}
|
||||||
|
global_params = update_with_kwargs(global_params, kwargs)
|
||||||
|
|
||||||
config_dict = {**defaults, **kwargs}
|
transcription_common_params = {
|
||||||
|
"backend": "simulstreaming",
|
||||||
|
"warmup_file": None,
|
||||||
|
"min_chunk_size": 0.5,
|
||||||
|
"model_size": "tiny",
|
||||||
|
"model_cache_dir": None,
|
||||||
|
"model_dir": None,
|
||||||
|
"lan": "auto",
|
||||||
|
"task": "transcribe",
|
||||||
|
}
|
||||||
|
transcription_common_params = update_with_kwargs(transcription_common_params, kwargs)
|
||||||
|
|
||||||
|
if transcription_common_params['model_size'].endswith(".en"):
|
||||||
|
transcription_common_params["lan"] = "en"
|
||||||
if 'no_transcription' in kwargs:
|
if 'no_transcription' in kwargs:
|
||||||
config_dict['transcription'] = not kwargs['no_transcription']
|
global_params['transcription'] = not global_params['no_transcription']
|
||||||
if 'no_vad' in kwargs:
|
if 'no_vad' in kwargs:
|
||||||
config_dict['vad'] = not kwargs['no_vad']
|
global_params['vad'] = not kwargs['no_vad']
|
||||||
if 'no_vac' in kwargs:
|
if 'no_vac' in kwargs:
|
||||||
config_dict['vac'] = not kwargs['no_vac']
|
global_params['vac'] = not kwargs['no_vac']
|
||||||
|
|
||||||
config_dict.pop('no_transcription', None)
|
|
||||||
config_dict.pop('no_vad', None)
|
|
||||||
|
|
||||||
if 'language' in kwargs:
|
self.args = Namespace(**{**global_params, **transcription_common_params})
|
||||||
config_dict['lan'] = kwargs['language']
|
|
||||||
config_dict.pop('language', None)
|
|
||||||
|
|
||||||
self.args = Namespace(**config_dict)
|
|
||||||
|
|
||||||
self.asr = None
|
self.asr = None
|
||||||
self.tokenizer = None
|
self.tokenizer = None
|
||||||
@@ -104,71 +81,79 @@ class TranscriptionEngine:
|
|||||||
if self.args.transcription:
|
if self.args.transcription:
|
||||||
if self.args.backend == "simulstreaming":
|
if self.args.backend == "simulstreaming":
|
||||||
from whisperlivekit.simul_whisper import SimulStreamingASR
|
from whisperlivekit.simul_whisper import SimulStreamingASR
|
||||||
self.tokenizer = None
|
|
||||||
simulstreaming_kwargs = {}
|
|
||||||
for attr in ['frame_threshold', 'beams', 'decoder_type', 'audio_max_len', 'audio_min_len',
|
|
||||||
'cif_ckpt_path', 'never_fire', 'init_prompt', 'static_init_prompt',
|
|
||||||
'max_context_tokens', 'model_path', 'warmup_file', 'preload_model_count', 'disable_fast_encoder']:
|
|
||||||
if hasattr(self.args, attr):
|
|
||||||
simulstreaming_kwargs[attr] = getattr(self.args, attr)
|
|
||||||
|
|
||||||
# Add segment_length from min_chunk_size
|
|
||||||
simulstreaming_kwargs['segment_length'] = getattr(self.args, 'min_chunk_size', 0.5)
|
|
||||||
simulstreaming_kwargs['task'] = self.args.task
|
|
||||||
|
|
||||||
size = self.args.model
|
simulstreaming_params = {
|
||||||
|
"disable_fast_encoder": False,
|
||||||
|
"custom_alignment_heads": None,
|
||||||
|
"frame_threshold": 25,
|
||||||
|
"beams": 1,
|
||||||
|
"decoder_type": None,
|
||||||
|
"audio_max_len": 20.0,
|
||||||
|
"audio_min_len": 0.0,
|
||||||
|
"cif_ckpt_path": None,
|
||||||
|
"never_fire": False,
|
||||||
|
"init_prompt": None,
|
||||||
|
"static_init_prompt": None,
|
||||||
|
"max_context_tokens": None,
|
||||||
|
"model_path": './base.pt',
|
||||||
|
"preload_model_count": 1,
|
||||||
|
}
|
||||||
|
simulstreaming_params = update_with_kwargs(simulstreaming_params, kwargs)
|
||||||
|
|
||||||
|
self.tokenizer = None
|
||||||
self.asr = SimulStreamingASR(
|
self.asr = SimulStreamingASR(
|
||||||
modelsize=size,
|
**transcription_common_params, **simulstreaming_params
|
||||||
lan=self.args.lan,
|
|
||||||
cache_dir=getattr(self.args, 'model_cache_dir', None),
|
|
||||||
model_dir=getattr(self.args, 'model_dir', None),
|
|
||||||
**simulstreaming_kwargs
|
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.asr, self.tokenizer = backend_factory(self.args)
|
|
||||||
warmup_asr(self.asr, self.args.warmup_file) #for simulstreaming, warmup should be done in the online class not here
|
whisperstreaming_params = {
|
||||||
|
"buffer_trimming": "segment",
|
||||||
|
"confidence_validation": False,
|
||||||
|
"buffer_trimming_sec": 15,
|
||||||
|
}
|
||||||
|
whisperstreaming_params = update_with_kwargs(whisperstreaming_params, kwargs)
|
||||||
|
|
||||||
|
self.asr = backend_factory(
|
||||||
|
**transcription_common_params, **whisperstreaming_params
|
||||||
|
)
|
||||||
|
|
||||||
if self.args.diarization:
|
if self.args.diarization:
|
||||||
if self.args.diarization_backend == "diart":
|
if self.args.diarization_backend == "diart":
|
||||||
from whisperlivekit.diarization.diart_backend import DiartDiarization
|
from whisperlivekit.diarization.diart_backend import DiartDiarization
|
||||||
|
diart_params = {
|
||||||
|
"segmentation_model": "pyannote/segmentation-3.0",
|
||||||
|
"embedding_model": "pyannote/embedding",
|
||||||
|
}
|
||||||
|
diart_params = update_with_kwargs(diart_params, kwargs)
|
||||||
self.diarization_model = DiartDiarization(
|
self.diarization_model = DiartDiarization(
|
||||||
block_duration=self.args.min_chunk_size,
|
block_duration=self.args.min_chunk_size,
|
||||||
segmentation_model_name=self.args.segmentation_model,
|
**diart_params
|
||||||
embedding_model_name=self.args.embedding_model
|
|
||||||
)
|
)
|
||||||
elif self.args.diarization_backend == "sortformer":
|
elif self.args.diarization_backend == "sortformer":
|
||||||
from whisperlivekit.diarization.sortformer_backend import SortformerDiarization
|
from whisperlivekit.diarization.sortformer_backend import SortformerDiarization
|
||||||
self.diarization_model = SortformerDiarization()
|
self.diarization_model = SortformerDiarization()
|
||||||
else:
|
|
||||||
raise ValueError(f"Unknown diarization backend: {self.args.diarization_backend}")
|
|
||||||
|
|
||||||
self.translation_model = None
|
self.translation_model = None
|
||||||
if self.args.target_language:
|
if self.args.target_language:
|
||||||
if self.args.lan == 'auto':
|
if self.args.lan == 'auto' and self.args.backend != "simulstreaming":
|
||||||
raise Exception('Translation cannot be set with language auto')
|
raise Exception('Translation cannot be set with language auto when transcription backend is not simulstreaming')
|
||||||
else:
|
else:
|
||||||
from whisperlivekit.translation.translation import load_model
|
from whisperlivekit.translation.translation import load_model
|
||||||
self.translation_model = load_model([self.args.lan], backend=self.args.nllb_backend, model_size=self.args.nllb_size) #in the future we want to handle different languages for different speakers
|
translation_params = {
|
||||||
|
"nllb_backend": "ctranslate2",
|
||||||
|
"nllb_size": "600M"
|
||||||
|
}
|
||||||
|
translation_params = update_with_kwargs(translation_params, kwargs)
|
||||||
|
self.translation_model = load_model([self.args.lan], **translation_params) #in the future we want to handle different languages for different speakers
|
||||||
TranscriptionEngine._initialized = True
|
TranscriptionEngine._initialized = True
|
||||||
|
|
||||||
|
|
||||||
|
def online_factory(args, asr):
|
||||||
def online_factory(args, asr, tokenizer, logfile=sys.stderr):
|
|
||||||
if args.backend == "simulstreaming":
|
if args.backend == "simulstreaming":
|
||||||
from whisperlivekit.simul_whisper import SimulStreamingOnlineProcessor
|
from whisperlivekit.simul_whisper import SimulStreamingOnlineProcessor
|
||||||
online = SimulStreamingOnlineProcessor(
|
online = SimulStreamingOnlineProcessor(asr)
|
||||||
asr,
|
|
||||||
logfile=logfile,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
online = OnlineASRProcessor(
|
online = OnlineASRProcessor(asr)
|
||||||
asr,
|
|
||||||
tokenizer,
|
|
||||||
logfile=logfile,
|
|
||||||
buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec),
|
|
||||||
confidence_validation = args.confidence_validation
|
|
||||||
)
|
|
||||||
return online
|
return online
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -242,7 +242,7 @@ class DiartDiarization:
|
|||||||
token.speaker = extract_number(segment.speaker) + 1
|
token.speaker = extract_number(segment.speaker) + 1
|
||||||
else:
|
else:
|
||||||
tokens = add_speaker_to_tokens(segments, tokens)
|
tokens = add_speaker_to_tokens(segments, tokens)
|
||||||
return tokens, segments[-1]
|
return tokens
|
||||||
|
|
||||||
def concatenate_speakers(segments):
|
def concatenate_speakers(segments):
|
||||||
segments_concatenated = [{"speaker": 1, "begin": 0.0, "end": 0.0}]
|
segments_concatenated = [{"speaker": 1, "begin": 0.0, "end": 0.0}]
|
||||||
|
|||||||
@@ -296,7 +296,7 @@ class SortformerDiarizationOnline:
|
|||||||
|
|
||||||
if not segments or not tokens:
|
if not segments or not tokens:
|
||||||
logger.debug("No segments or tokens available for speaker assignment")
|
logger.debug("No segments or tokens available for speaker assignment")
|
||||||
return tokens, None
|
return tokens
|
||||||
|
|
||||||
logger.debug(f"Assigning speakers to {len(tokens)} tokens using {len(segments)} segments")
|
logger.debug(f"Assigning speakers to {len(tokens)} tokens using {len(segments)} segments")
|
||||||
use_punctuation_split = False
|
use_punctuation_split = False
|
||||||
@@ -313,7 +313,7 @@ class SortformerDiarizationOnline:
|
|||||||
# Use punctuation-aware assignment (similar to diart_backend)
|
# Use punctuation-aware assignment (similar to diart_backend)
|
||||||
tokens = self._add_speaker_to_tokens_with_punctuation(segments, tokens)
|
tokens = self._add_speaker_to_tokens_with_punctuation(segments, tokens)
|
||||||
|
|
||||||
return tokens, segments[-1]
|
return tokens
|
||||||
|
|
||||||
def _add_speaker_to_tokens_with_punctuation(self, segments: List[SpeakerSegment], tokens: list) -> list:
|
def _add_speaker_to_tokens_with_punctuation(self, segments: List[SpeakerSegment], tokens: list) -> list:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -89,6 +89,7 @@ def parse_args():
|
|||||||
"--model",
|
"--model",
|
||||||
type=str,
|
type=str,
|
||||||
default="small",
|
default="small",
|
||||||
|
dest='model_size',
|
||||||
help="Name size of the Whisper model to use (default: tiny). Suggested values: tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large,large-v3-turbo. The model is automatically downloaded from the model hub if not present in model cache dir.",
|
help="Name size of the Whisper model to use (default: tiny). Suggested values: tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large,large-v3-turbo. The model is automatically downloaded from the model hub if not present in model cache dir.",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -109,6 +110,7 @@ def parse_args():
|
|||||||
"--language",
|
"--language",
|
||||||
type=str,
|
type=str,
|
||||||
default="auto",
|
default="auto",
|
||||||
|
dest='lan',
|
||||||
help="Source language code, e.g. en,de,cs, or 'auto' for language detection.",
|
help="Source language code, e.g. en,de,cs, or 'auto' for language detection.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@@ -173,6 +175,7 @@ def parse_args():
|
|||||||
)
|
)
|
||||||
parser.add_argument("--ssl-certfile", type=str, help="Path to the SSL certificate file.", default=None)
|
parser.add_argument("--ssl-certfile", type=str, help="Path to the SSL certificate file.", default=None)
|
||||||
parser.add_argument("--ssl-keyfile", type=str, help="Path to the SSL private key file.", default=None)
|
parser.add_argument("--ssl-keyfile", type=str, help="Path to the SSL private key file.", default=None)
|
||||||
|
parser.add_argument("--forwarded-allow-ips", type=str, help="Allowed ips for reverse proxying.", default=None)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--pcm-input",
|
"--pcm-input",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
@@ -189,6 +192,13 @@ def parse_args():
|
|||||||
dest="disable_fast_encoder",
|
dest="disable_fast_encoder",
|
||||||
help="Disable Faster Whisper or MLX Whisper backends for encoding (if installed). Slower but helpful when GPU memory is limited",
|
help="Disable Faster Whisper or MLX Whisper backends for encoding (if installed). Slower but helpful when GPU memory is limited",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
simulstreaming_group.add_argument(
|
||||||
|
"--custom-alignment-heads",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Use your own alignment heads, useful when `--model-dir` is used",
|
||||||
|
)
|
||||||
|
|
||||||
simulstreaming_group.add_argument(
|
simulstreaming_group.add_argument(
|
||||||
"--frame-threshold",
|
"--frame-threshold",
|
||||||
|
|||||||
@@ -78,16 +78,8 @@ def no_token_to_silence(tokens):
|
|||||||
return new_tokens
|
return new_tokens
|
||||||
|
|
||||||
def ends_with_silence(tokens, current_time, vac_detected_silence):
|
def ends_with_silence(tokens, current_time, vac_detected_silence):
|
||||||
end_w_silence = False
|
|
||||||
if not tokens:
|
|
||||||
return [], end_w_silence
|
|
||||||
last_token = tokens[-1]
|
last_token = tokens[-1]
|
||||||
if tokens and current_time and (
|
if vac_detected_silence or (current_time - last_token.end >= END_SILENCE_DURATION):
|
||||||
current_time - last_token.end >= END_SILENCE_DURATION
|
|
||||||
or
|
|
||||||
(current_time - last_token.end >= 3 and vac_detected_silence)
|
|
||||||
):
|
|
||||||
end_w_silence = True
|
|
||||||
if last_token.speaker == -2:
|
if last_token.speaker == -2:
|
||||||
last_token.end = current_time
|
last_token.end = current_time
|
||||||
else:
|
else:
|
||||||
@@ -99,12 +91,14 @@ def ends_with_silence(tokens, current_time, vac_detected_silence):
|
|||||||
probability=0.95
|
probability=0.95
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return tokens, end_w_silence
|
return tokens
|
||||||
|
|
||||||
|
|
||||||
def handle_silences(tokens, current_time, vac_detected_silence):
|
def handle_silences(tokens, current_time, vac_detected_silence):
|
||||||
|
if not tokens:
|
||||||
|
return []
|
||||||
tokens = blank_to_silence(tokens) #useful for simulstreaming backend which tends to generate [BLANK_AUDIO] text
|
tokens = blank_to_silence(tokens) #useful for simulstreaming backend which tends to generate [BLANK_AUDIO] text
|
||||||
tokens = no_token_to_silence(tokens)
|
tokens = no_token_to_silence(tokens)
|
||||||
tokens, end_w_silence = ends_with_silence(tokens, current_time, vac_detected_silence)
|
tokens = ends_with_silence(tokens, current_time, vac_detected_silence)
|
||||||
return tokens, end_w_silence
|
return tokens
|
||||||
|
|
||||||
@@ -7,6 +7,8 @@ logger = logging.getLogger(__name__)
|
|||||||
logger.setLevel(logging.DEBUG)
|
logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
CHECK_AROUND = 4
|
CHECK_AROUND = 4
|
||||||
|
DEBUG = False
|
||||||
|
|
||||||
|
|
||||||
def is_punctuation(token):
|
def is_punctuation(token):
|
||||||
if token.is_punctuation():
|
if token.is_punctuation():
|
||||||
@@ -30,95 +32,96 @@ def next_speaker_change(i, tokens, speaker):
|
|||||||
|
|
||||||
def new_line(
|
def new_line(
|
||||||
token,
|
token,
|
||||||
speaker,
|
|
||||||
debug_info = ""
|
|
||||||
):
|
):
|
||||||
return Line(
|
return Line(
|
||||||
speaker = speaker,
|
speaker = token.corrected_speaker,
|
||||||
text = token.text + debug_info,
|
text = token.text + (f"[{format_time(token.start)} : {format_time(token.end)}]" if DEBUG else ""),
|
||||||
start = token.start,
|
start = token.start,
|
||||||
end = token.end,
|
end = token.end,
|
||||||
|
detected_language=token.detected_language
|
||||||
)
|
)
|
||||||
|
|
||||||
def append_token_to_last_line(lines, sep, token, debug_info):
|
def append_token_to_last_line(lines, sep, token):
|
||||||
if token.text:
|
if not lines:
|
||||||
lines[-1].text += sep + token.text + debug_info
|
lines.append(new_line(token))
|
||||||
lines[-1].end = token.end
|
else:
|
||||||
|
if token.text:
|
||||||
|
lines[-1].text += sep + token.text + (f"[{format_time(token.start)} : {format_time(token.end)}]" if DEBUG else "")
|
||||||
|
lines[-1].end = token.end
|
||||||
|
if not lines[-1].detected_language and token.detected_language:
|
||||||
|
lines[-1].detected_language = token.detected_language
|
||||||
|
|
||||||
|
|
||||||
def format_output(state, silence, current_time, args, debug, sep):
|
def format_output(state, silence, current_time, args, sep):
|
||||||
diarization = args.diarization
|
diarization = args.diarization
|
||||||
disable_punctuation_split = args.disable_punctuation_split
|
disable_punctuation_split = args.disable_punctuation_split
|
||||||
tokens = state.tokens
|
tokens = state.tokens
|
||||||
translated_segments = state.translated_segments # Here we will attribute the speakers only based on the timestamps of the segments
|
translated_segments = state.translated_segments # Here we will attribute the speakers only based on the timestamps of the segments
|
||||||
end_attributed_speaker = state.end_attributed_speaker
|
last_validated_token = state.last_validated_token
|
||||||
|
|
||||||
previous_speaker = -1
|
previous_speaker = 1
|
||||||
lines = []
|
|
||||||
undiarized_text = []
|
undiarized_text = []
|
||||||
tokens, end_w_silence = handle_silences(tokens, current_time, silence)
|
tokens = handle_silences(tokens, current_time, silence)
|
||||||
last_punctuation = None
|
last_punctuation = None
|
||||||
for i, token in enumerate(tokens):
|
for i, token in enumerate(tokens[last_validated_token:]):
|
||||||
speaker = token.speaker
|
speaker = int(token.speaker)
|
||||||
if not diarization and speaker == -1: #Speaker -1 means no attributed by diarization. In the frontend, it should appear under 'Speaker 1'
|
token.corrected_speaker = speaker
|
||||||
speaker = 1
|
if not diarization:
|
||||||
if diarization and not tokens[-1].speaker == -2:
|
if speaker == -1: #Speaker -1 means no attributed by diarization. In the frontend, it should appear under 'Speaker 1'
|
||||||
if (speaker in [-1, 0]) and token.end >= end_attributed_speaker:
|
token.corrected_speaker = 1
|
||||||
undiarized_text.append(token.text)
|
token.validated_speaker = True
|
||||||
continue
|
|
||||||
elif (speaker in [-1, 0]) and token.end < end_attributed_speaker:
|
|
||||||
speaker = previous_speaker
|
|
||||||
debug_info = ""
|
|
||||||
if debug:
|
|
||||||
debug_info = f"[{format_time(token.start)} : {format_time(token.end)}]"
|
|
||||||
|
|
||||||
if not lines:
|
|
||||||
lines.append(new_line(token, speaker, debug_info = ""))
|
|
||||||
continue
|
|
||||||
else:
|
else:
|
||||||
previous_speaker = lines[-1].speaker
|
# if token.end > end_attributed_speaker and token.speaker != -2:
|
||||||
|
# if tokens[-1].speaker == -2: #if it finishes by a silence, we want to append the undiarized text to the last speaker.
|
||||||
if is_punctuation(token):
|
# token.corrected_speaker = previous_speaker
|
||||||
last_punctuation = i
|
# else:
|
||||||
|
# undiarized_text.append(token.text)
|
||||||
|
# continue
|
||||||
if last_punctuation == i-1:
|
# else:
|
||||||
if speaker != previous_speaker:
|
if is_punctuation(token):
|
||||||
# perfect, diarization perfectly aligned
|
last_punctuation = i
|
||||||
lines.append(new_line(token, speaker, debug_info = ""))
|
|
||||||
last_punctuation, next_punctuation = None, None
|
if last_punctuation == i-1:
|
||||||
continue
|
if token.speaker != previous_speaker:
|
||||||
|
token.validated_speaker = True
|
||||||
speaker_change_pos, new_speaker = next_speaker_change(i, tokens, speaker)
|
# perfect, diarization perfectly aligned
|
||||||
if speaker_change_pos:
|
last_punctuation = None
|
||||||
# Corrects delay:
|
else:
|
||||||
# That was the idea. Okay haha |SPLIT SPEAKER| that's a good one
|
speaker_change_pos, new_speaker = next_speaker_change(i, tokens, speaker)
|
||||||
# should become:
|
if speaker_change_pos:
|
||||||
# That was the idea. |SPLIT SPEAKER| Okay haha that's a good one
|
# Corrects delay:
|
||||||
lines.append(new_line(token, new_speaker, debug_info = ""))
|
# That was the idea. <Okay> haha |SPLIT SPEAKER| that's a good one
|
||||||
else:
|
# should become:
|
||||||
# No speaker change to come
|
# That was the idea. |SPLIT SPEAKER| <Okay> haha that's a good one
|
||||||
append_token_to_last_line(lines, sep, token, debug_info)
|
token.corrected_speaker = new_speaker
|
||||||
continue
|
token.validated_speaker = True
|
||||||
|
elif speaker != previous_speaker:
|
||||||
|
if not (speaker == -2 or previous_speaker == -2):
|
||||||
|
if next_punctuation_change(i, tokens):
|
||||||
|
# Corrects advance:
|
||||||
|
# Are you |SPLIT SPEAKER| <okay>? yeah, sure. Absolutely
|
||||||
|
# should become:
|
||||||
|
# Are you <okay>? |SPLIT SPEAKER| yeah, sure. Absolutely
|
||||||
|
token.corrected_speaker = previous_speaker
|
||||||
|
token.validated_speaker = True
|
||||||
|
else: #Problematic, except if the language has no punctuation. We append to previous line, except if disable_punctuation_split is set to True.
|
||||||
|
if not disable_punctuation_split:
|
||||||
|
token.corrected_speaker = previous_speaker
|
||||||
|
token.validated_speaker = False
|
||||||
|
if token.validated_speaker:
|
||||||
|
state.last_validated_token = i
|
||||||
|
previous_speaker = token.corrected_speaker
|
||||||
|
|
||||||
if speaker != previous_speaker:
|
previous_speaker = 1
|
||||||
if speaker == -2 or previous_speaker == -2: #silences can happen anytime
|
|
||||||
lines.append(new_line(token, speaker, debug_info = ""))
|
lines = []
|
||||||
continue
|
for token in tokens:
|
||||||
elif next_punctuation_change(i, tokens):
|
if int(token.corrected_speaker) != int(previous_speaker):
|
||||||
# Corrects advance:
|
lines.append(new_line(token))
|
||||||
# Are you |SPLIT SPEAKER| okay? yeah, sure. Absolutely
|
else:
|
||||||
# should become:
|
append_token_to_last_line(lines, sep, token)
|
||||||
# Are you okay? |SPLIT SPEAKER| yeah, sure. Absolutely
|
|
||||||
append_token_to_last_line(lines, sep, token, debug_info)
|
previous_speaker = token.corrected_speaker
|
||||||
continue
|
|
||||||
else: #we create a new speaker, but that's no ideal. We are not sure about the split. We prefer to append to previous line
|
|
||||||
if disable_punctuation_split:
|
|
||||||
lines.append(new_line(token, speaker, debug_info = ""))
|
|
||||||
continue
|
|
||||||
pass
|
|
||||||
|
|
||||||
append_token_to_last_line(lines, sep, token, debug_info)
|
|
||||||
|
|
||||||
if lines and translated_segments:
|
if lines and translated_segments:
|
||||||
unassigned_translated_segments = []
|
unassigned_translated_segments = []
|
||||||
@@ -154,4 +157,4 @@ def format_output(state, silence, current_time, args, debug, sep):
|
|||||||
if state.buffer_transcription and lines:
|
if state.buffer_transcription and lines:
|
||||||
lines[-1].end = max(state.buffer_transcription.end, lines[-1].end)
|
lines[-1].end = max(state.buffer_transcription.end, lines[-1].end)
|
||||||
|
|
||||||
return lines, undiarized_text, end_w_silence
|
return lines, undiarized_text
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import logging
|
|||||||
from typing import List, Tuple, Optional
|
from typing import List, Tuple, Optional
|
||||||
import logging
|
import logging
|
||||||
import platform
|
import platform
|
||||||
from whisperlivekit.timed_objects import ASRToken, Transcript, SpeakerSegment
|
from whisperlivekit.timed_objects import ASRToken, Transcript, ChangeSpeaker
|
||||||
from whisperlivekit.warmup import load_file
|
from whisperlivekit.warmup import load_file
|
||||||
from .whisper import load_model, tokenizer
|
from .whisper import load_model, tokenizer
|
||||||
from .whisper.audio import TOKENS_PER_SECOND
|
from .whisper.audio import TOKENS_PER_SECOND
|
||||||
@@ -22,11 +22,9 @@ try:
|
|||||||
HAS_MLX_WHISPER = True
|
HAS_MLX_WHISPER = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
if platform.system() == "Darwin" and platform.machine() == "arm64":
|
if platform.system() == "Darwin" and platform.machine() == "arm64":
|
||||||
print(f"""
|
print(f"""{"="*50}
|
||||||
{"="*50}
|
MLX Whisper not found but you are on Apple Silicon. Consider installing mlx-whisper for better performance: pip install mlx-whisper
|
||||||
MLX Whisper not found but you are on Apple Silicon. Consider installing mlx-whisper for better performance: pip install mlx-whisper
|
{"="*50}""")
|
||||||
{"="*50}
|
|
||||||
""")
|
|
||||||
HAS_MLX_WHISPER = False
|
HAS_MLX_WHISPER = False
|
||||||
if HAS_MLX_WHISPER:
|
if HAS_MLX_WHISPER:
|
||||||
HAS_FASTER_WHISPER = False
|
HAS_FASTER_WHISPER = False
|
||||||
@@ -47,7 +45,6 @@ class SimulStreamingOnlineProcessor:
|
|||||||
self,
|
self,
|
||||||
asr,
|
asr,
|
||||||
logfile=sys.stderr,
|
logfile=sys.stderr,
|
||||||
warmup_file=None
|
|
||||||
):
|
):
|
||||||
self.asr = asr
|
self.asr = asr
|
||||||
self.logfile = logfile
|
self.logfile = logfile
|
||||||
@@ -93,14 +90,16 @@ class SimulStreamingOnlineProcessor:
|
|||||||
self.end = audio_stream_end_time #Only to be aligned with what happens in whisperstreaming backend.
|
self.end = audio_stream_end_time #Only to be aligned with what happens in whisperstreaming backend.
|
||||||
self.model.insert_audio(audio_tensor)
|
self.model.insert_audio(audio_tensor)
|
||||||
|
|
||||||
def on_new_speaker(self, last_segment: SpeakerSegment):
|
def new_speaker(self, change_speaker: ChangeSpeaker):
|
||||||
self.model.on_new_speaker(last_segment)
|
self.process_iter(is_last=True)
|
||||||
self.model.refresh_segment(complete=True)
|
self.model.refresh_segment(complete=True)
|
||||||
|
self.model.speaker = change_speaker.speaker
|
||||||
|
self.global_time_offset = change_speaker.start
|
||||||
|
|
||||||
def get_buffer(self):
|
def get_buffer(self):
|
||||||
concat_buffer = Transcript.from_tokens(tokens= self.buffer, sep='')
|
concat_buffer = Transcript.from_tokens(tokens= self.buffer, sep='')
|
||||||
return concat_buffer
|
return concat_buffer
|
||||||
|
|
||||||
def process_iter(self, is_last=False) -> Tuple[List[ASRToken], float]:
|
def process_iter(self, is_last=False) -> Tuple[List[ASRToken], float]:
|
||||||
"""
|
"""
|
||||||
Process accumulated audio chunks using SimulStreaming.
|
Process accumulated audio chunks using SimulStreaming.
|
||||||
@@ -108,9 +107,13 @@ class SimulStreamingOnlineProcessor:
|
|||||||
Returns a tuple: (list of committed ASRToken objects, float representing the audio processed up to time).
|
Returns a tuple: (list of committed ASRToken objects, float representing the audio processed up to time).
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
timestamped_words, timestamped_buffer_language = self.model.infer(is_last=is_last)
|
timestamped_words = self.model.infer(is_last=is_last)
|
||||||
self.buffer = timestamped_buffer_language
|
if self.model.cfg.language == "auto" and timestamped_words and timestamped_words[0].detected_language == None:
|
||||||
|
self.buffer.extend(timestamped_words)
|
||||||
|
return [], self.end
|
||||||
|
|
||||||
self.committed.extend(timestamped_words)
|
self.committed.extend(timestamped_words)
|
||||||
|
self.buffer = []
|
||||||
return timestamped_words, self.end
|
return timestamped_words, self.end
|
||||||
|
|
||||||
|
|
||||||
@@ -140,31 +143,20 @@ class SimulStreamingASR():
|
|||||||
"""SimulStreaming backend with AlignAtt policy."""
|
"""SimulStreaming backend with AlignAtt policy."""
|
||||||
sep = ""
|
sep = ""
|
||||||
|
|
||||||
def __init__(self, lan, modelsize=None, cache_dir=None, model_dir=None, logfile=sys.stderr, **kwargs):
|
def __init__(self, logfile=sys.stderr, **kwargs):
|
||||||
self.logfile = logfile
|
self.logfile = logfile
|
||||||
self.transcribe_kargs = {}
|
self.transcribe_kargs = {}
|
||||||
self.original_language = lan
|
|
||||||
|
|
||||||
self.model_path = kwargs.get('model_path', './large-v3.pt')
|
for key, value in kwargs.items():
|
||||||
self.frame_threshold = kwargs.get('frame_threshold', 25)
|
setattr(self, key, value)
|
||||||
self.audio_max_len = kwargs.get('audio_max_len', 20.0)
|
|
||||||
self.audio_min_len = kwargs.get('audio_min_len', 0.0)
|
if self.decoder_type is None:
|
||||||
self.segment_length = kwargs.get('segment_length', 0.5)
|
self.decoder_type = 'greedy' if self.beams == 1 else 'beam'
|
||||||
self.beams = kwargs.get('beams', 1)
|
|
||||||
self.decoder_type = kwargs.get('decoder_type', 'greedy' if self.beams == 1 else 'beam')
|
|
||||||
self.task = kwargs.get('task', 'transcribe')
|
|
||||||
self.cif_ckpt_path = kwargs.get('cif_ckpt_path', None)
|
|
||||||
self.never_fire = kwargs.get('never_fire', False)
|
|
||||||
self.init_prompt = kwargs.get('init_prompt', None)
|
|
||||||
self.static_init_prompt = kwargs.get('static_init_prompt', None)
|
|
||||||
self.max_context_tokens = kwargs.get('max_context_tokens', None)
|
|
||||||
self.warmup_file = kwargs.get('warmup_file', None)
|
|
||||||
self.preload_model_count = kwargs.get('preload_model_count', 1)
|
|
||||||
self.disable_fast_encoder = kwargs.get('disable_fast_encoder', False)
|
|
||||||
self.fast_encoder = False
|
self.fast_encoder = False
|
||||||
if model_dir is not None:
|
if self.model_dir is not None:
|
||||||
self.model_path = model_dir
|
self.model_path = self.model_dir
|
||||||
elif modelsize is not None:
|
elif self.model_size is not None:
|
||||||
model_mapping = {
|
model_mapping = {
|
||||||
'tiny': './tiny.pt',
|
'tiny': './tiny.pt',
|
||||||
'base': './base.pt',
|
'base': './base.pt',
|
||||||
@@ -179,13 +171,13 @@ class SimulStreamingASR():
|
|||||||
'large-v3': './large-v3.pt',
|
'large-v3': './large-v3.pt',
|
||||||
'large': './large-v3.pt'
|
'large': './large-v3.pt'
|
||||||
}
|
}
|
||||||
self.model_path = model_mapping.get(modelsize, f'./{modelsize}.pt')
|
self.model_path = model_mapping.get(self.model_size, f'./{self.model_size}.pt')
|
||||||
|
|
||||||
self.cfg = AlignAttConfig(
|
self.cfg = AlignAttConfig(
|
||||||
model_path=self.model_path,
|
model_path=self.model_path,
|
||||||
segment_length=self.segment_length,
|
segment_length=self.min_chunk_size,
|
||||||
frame_threshold=self.frame_threshold,
|
frame_threshold=self.frame_threshold,
|
||||||
language=self.original_language,
|
language=self.lan,
|
||||||
audio_max_len=self.audio_max_len,
|
audio_max_len=self.audio_max_len,
|
||||||
audio_min_len=self.audio_min_len,
|
audio_min_len=self.audio_min_len,
|
||||||
cif_ckpt_path=self.cif_ckpt_path,
|
cif_ckpt_path=self.cif_ckpt_path,
|
||||||
@@ -204,8 +196,12 @@ class SimulStreamingASR():
|
|||||||
else:
|
else:
|
||||||
self.tokenizer = None
|
self.tokenizer = None
|
||||||
|
|
||||||
self.model_name = os.path.basename(self.cfg.model_path).replace(".pt", "")
|
if self.model_dir:
|
||||||
self.model_path = os.path.dirname(os.path.abspath(self.cfg.model_path))
|
self.model_name = self.model_dir
|
||||||
|
self.model_path = None
|
||||||
|
else:
|
||||||
|
self.model_name = os.path.basename(self.cfg.model_path).replace(".pt", "")
|
||||||
|
self.model_path = os.path.dirname(os.path.abspath(self.cfg.model_path))
|
||||||
|
|
||||||
self.mlx_encoder, self.fw_encoder = None, None
|
self.mlx_encoder, self.fw_encoder = None, None
|
||||||
if not self.disable_fast_encoder:
|
if not self.disable_fast_encoder:
|
||||||
@@ -227,7 +223,12 @@ class SimulStreamingASR():
|
|||||||
|
|
||||||
|
|
||||||
def load_model(self):
|
def load_model(self):
|
||||||
whisper_model = load_model(name=self.model_name, download_root=self.model_path, decoder_only=self.fast_encoder)
|
whisper_model = load_model(
|
||||||
|
name=self.model_name,
|
||||||
|
download_root=self.model_path,
|
||||||
|
decoder_only=self.fast_encoder,
|
||||||
|
custom_alignment_heads=self.custom_alignment_heads
|
||||||
|
)
|
||||||
warmup_audio = load_file(self.warmup_file)
|
warmup_audio = load_file(self.warmup_file)
|
||||||
if warmup_audio is not None:
|
if warmup_audio is not None:
|
||||||
warmup_audio = torch.from_numpy(warmup_audio).float()
|
warmup_audio = torch.from_numpy(warmup_audio).float()
|
||||||
@@ -243,7 +244,7 @@ class SimulStreamingASR():
|
|||||||
else:
|
else:
|
||||||
# For standard encoder, use the original transcribe warmup
|
# For standard encoder, use the original transcribe warmup
|
||||||
warmup_audio = load_file(self.warmup_file)
|
warmup_audio = load_file(self.warmup_file)
|
||||||
whisper_model.transcribe(warmup_audio, language=self.original_language if self.original_language != 'auto' else None)
|
whisper_model.transcribe(warmup_audio, language=self.lan if self.lan != 'auto' else None)
|
||||||
return whisper_model
|
return whisper_model
|
||||||
|
|
||||||
def get_new_model_instance(self):
|
def get_new_model_instance(self):
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ class PaddedAlignAttWhisper:
|
|||||||
self.fw_feature_extractor = FeatureExtractor(feature_size=self.model.dims.n_mels)
|
self.fw_feature_extractor = FeatureExtractor(feature_size=self.model.dims.n_mels)
|
||||||
|
|
||||||
logger.info(f"Model dimensions: {self.model.dims}")
|
logger.info(f"Model dimensions: {self.model.dims}")
|
||||||
|
self.speaker = -1
|
||||||
self.decode_options = DecodingOptions(
|
self.decode_options = DecodingOptions(
|
||||||
language = cfg.language,
|
language = cfg.language,
|
||||||
without_timestamps = True,
|
without_timestamps = True,
|
||||||
@@ -78,7 +78,6 @@ class PaddedAlignAttWhisper:
|
|||||||
self.detected_language = cfg.language if cfg.language != "auto" else None
|
self.detected_language = cfg.language if cfg.language != "auto" else None
|
||||||
self.global_time_offset = 0.0
|
self.global_time_offset = 0.0
|
||||||
self.reset_tokenizer_to_auto_next_call = False
|
self.reset_tokenizer_to_auto_next_call = False
|
||||||
self.sentence_start_time = 0.0
|
|
||||||
|
|
||||||
self.max_text_len = self.model.dims.n_text_ctx
|
self.max_text_len = self.model.dims.n_text_ctx
|
||||||
self.num_decoder_layers = len(self.model.decoder.blocks)
|
self.num_decoder_layers = len(self.model.decoder.blocks)
|
||||||
@@ -153,7 +152,7 @@ class PaddedAlignAttWhisper:
|
|||||||
|
|
||||||
self.last_attend_frame = -self.cfg.rewind_threshold
|
self.last_attend_frame = -self.cfg.rewind_threshold
|
||||||
self.cumulative_time_offset = 0.0
|
self.cumulative_time_offset = 0.0
|
||||||
self.sentence_start_time = self.cumulative_time_offset + self.segments_len()
|
self.first_timestamp = None
|
||||||
|
|
||||||
if self.cfg.max_context_tokens is None:
|
if self.cfg.max_context_tokens is None:
|
||||||
self.max_context_tokens = self.max_text_len
|
self.max_context_tokens = self.max_text_len
|
||||||
@@ -261,7 +260,6 @@ class PaddedAlignAttWhisper:
|
|||||||
self.init_context()
|
self.init_context()
|
||||||
logger.debug(f"Context: {self.context}")
|
logger.debug(f"Context: {self.context}")
|
||||||
if not complete and len(self.segments) > 2:
|
if not complete and len(self.segments) > 2:
|
||||||
logger.debug("keeping last two segments because they are and it is not complete.")
|
|
||||||
self.segments = self.segments[-2:]
|
self.segments = self.segments[-2:]
|
||||||
else:
|
else:
|
||||||
logger.debug("removing all segments.")
|
logger.debug("removing all segments.")
|
||||||
@@ -434,18 +432,19 @@ class PaddedAlignAttWhisper:
|
|||||||
end_encode = time()
|
end_encode = time()
|
||||||
# print('Encoder duration:', end_encode-beg_encode)
|
# print('Encoder duration:', end_encode-beg_encode)
|
||||||
|
|
||||||
if self.cfg.language == "auto" and self.detected_language is None:
|
if self.cfg.language == "auto" and self.detected_language is None and self.first_timestamp:
|
||||||
seconds_since_start = (self.cumulative_time_offset + self.segments_len()) - self.sentence_start_time
|
seconds_since_start = self.segments_len() - self.first_timestamp
|
||||||
if seconds_since_start >= 3.0:
|
if seconds_since_start >= 2.0:
|
||||||
language_tokens, language_probs = self.lang_id(encoder_feature)
|
language_tokens, language_probs = self.lang_id(encoder_feature)
|
||||||
top_lan, p = max(language_probs[0].items(), key=lambda x: x[1])
|
top_lan, p = max(language_probs[0].items(), key=lambda x: x[1])
|
||||||
print(f"Detected language: {top_lan} with p={p:.4f}")
|
print(f"Detected language: {top_lan} with p={p:.4f}")
|
||||||
self.create_tokenizer(top_lan)
|
self.create_tokenizer(top_lan)
|
||||||
self.refresh_segment(complete=True)
|
self.last_attend_frame = -self.cfg.rewind_threshold
|
||||||
|
self.cumulative_time_offset = 0.0
|
||||||
|
self.init_tokens()
|
||||||
|
self.init_context()
|
||||||
self.detected_language = top_lan
|
self.detected_language = top_lan
|
||||||
logger.info(f"Tokenizer language: {self.tokenizer.language}, {self.tokenizer.sot_sequence_including_notimestamps}")
|
logger.info(f"Tokenizer language: {self.tokenizer.language}, {self.tokenizer.sot_sequence_including_notimestamps}")
|
||||||
else:
|
|
||||||
logger.debug(f"Skipping language detection: {seconds_since_start:.2f}s < 3.0s")
|
|
||||||
|
|
||||||
self.trim_context()
|
self.trim_context()
|
||||||
current_tokens = self._current_tokens()
|
current_tokens = self._current_tokens()
|
||||||
@@ -590,6 +589,10 @@ class PaddedAlignAttWhisper:
|
|||||||
|
|
||||||
self._clean_cache()
|
self._clean_cache()
|
||||||
|
|
||||||
|
if len(l_absolute_timestamps) >=2 and self.first_timestamp is None:
|
||||||
|
self.first_timestamp = l_absolute_timestamps[0]
|
||||||
|
|
||||||
|
|
||||||
timestamped_words = []
|
timestamped_words = []
|
||||||
timestamp_idx = 0
|
timestamp_idx = 0
|
||||||
for word, word_tokens in zip(split_words, split_tokens):
|
for word, word_tokens in zip(split_words, split_tokens):
|
||||||
@@ -604,15 +607,11 @@ class PaddedAlignAttWhisper:
|
|||||||
end=current_timestamp + 0.1,
|
end=current_timestamp + 0.1,
|
||||||
text= word,
|
text= word,
|
||||||
probability=0.95,
|
probability=0.95,
|
||||||
language=self.detected_language
|
speaker=self.speaker,
|
||||||
|
detected_language=self.detected_language
|
||||||
).with_offset(
|
).with_offset(
|
||||||
self.global_time_offset
|
self.global_time_offset
|
||||||
)
|
)
|
||||||
timestamped_words.append(timestamp_entry)
|
timestamped_words.append(timestamp_entry)
|
||||||
|
|
||||||
if self.detected_language is None and self.cfg.language == "auto":
|
return timestamped_words
|
||||||
timestamped_buffer_language, timestamped_words = timestamped_words, []
|
|
||||||
else:
|
|
||||||
timestamped_buffer_language = []
|
|
||||||
|
|
||||||
return timestamped_words, timestamped_buffer_language
|
|
||||||
@@ -105,7 +105,8 @@ def load_model(
|
|||||||
device: Optional[Union[str, torch.device]] = None,
|
device: Optional[Union[str, torch.device]] = None,
|
||||||
download_root: str = None,
|
download_root: str = None,
|
||||||
in_memory: bool = False,
|
in_memory: bool = False,
|
||||||
decoder_only=False
|
decoder_only=False,
|
||||||
|
custom_alignment_heads=None
|
||||||
) -> Whisper:
|
) -> Whisper:
|
||||||
"""
|
"""
|
||||||
Load a Whisper ASR model
|
Load a Whisper ASR model
|
||||||
@@ -135,15 +136,17 @@ def load_model(
|
|||||||
download_root = os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper")
|
download_root = os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper")
|
||||||
|
|
||||||
if name in _MODELS:
|
if name in _MODELS:
|
||||||
checkpoint_file = _download(_MODELS[name], download_root, in_memory)
|
checkpoint_file = _download(_MODELS[name], download_root, in_memory)
|
||||||
alignment_heads = _ALIGNMENT_HEADS[name]
|
|
||||||
elif os.path.isfile(name):
|
elif os.path.isfile(name):
|
||||||
checkpoint_file = open(name, "rb").read() if in_memory else name
|
checkpoint_file = open(name, "rb").read() if in_memory else name
|
||||||
alignment_heads = None
|
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Model {name} not found; available models = {available_models()}"
|
f"Model {name} not found; available models = {available_models()}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
alignment_heads = _ALIGNMENT_HEADS.get(name, None)
|
||||||
|
if custom_alignment_heads:
|
||||||
|
alignment_heads = custom_alignment_heads.encode()
|
||||||
|
|
||||||
with (
|
with (
|
||||||
io.BytesIO(checkpoint_file) if in_memory else open(checkpoint_file, "rb")
|
io.BytesIO(checkpoint_file) if in_memory else open(checkpoint_file, "rb")
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class TimedText:
|
|||||||
speaker: Optional[int] = -1
|
speaker: Optional[int] = -1
|
||||||
probability: Optional[float] = None
|
probability: Optional[float] = None
|
||||||
is_dummy: Optional[bool] = False
|
is_dummy: Optional[bool] = False
|
||||||
language: str = None
|
detected_language: Optional[str] = None
|
||||||
|
|
||||||
def is_punctuation(self):
|
def is_punctuation(self):
|
||||||
return self.text.strip() in PUNCTUATION_MARKS
|
return self.text.strip() in PUNCTUATION_MARKS
|
||||||
@@ -41,11 +41,17 @@ class TimedText:
|
|||||||
return bool(self.text)
|
return bool(self.text)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass()
|
||||||
class ASRToken(TimedText):
|
class ASRToken(TimedText):
|
||||||
|
|
||||||
|
corrected_speaker: Optional[int] = -1
|
||||||
|
validated_speaker: bool = False
|
||||||
|
validated_text: bool = False
|
||||||
|
validated_language: bool = False
|
||||||
|
|
||||||
def with_offset(self, offset: float) -> "ASRToken":
|
def with_offset(self, offset: float) -> "ASRToken":
|
||||||
"""Return a new token with the time offset added."""
|
"""Return a new token with the time offset added."""
|
||||||
return ASRToken(self.start + offset, self.end + offset, self.text, self.speaker, self.probability)
|
return ASRToken(self.start + offset, self.end + offset, self.text, self.speaker, self.probability, detected_language=self.detected_language)
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Sentence(TimedText):
|
class Sentence(TimedText):
|
||||||
@@ -123,11 +129,10 @@ class Silence():
|
|||||||
@dataclass
|
@dataclass
|
||||||
class Line(TimedText):
|
class Line(TimedText):
|
||||||
translation: str = ''
|
translation: str = ''
|
||||||
detected_language: str = None
|
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
_dict = {
|
_dict = {
|
||||||
'speaker': int(self.speaker),
|
'speaker': int(self.speaker) if self.speaker != -1 else 1,
|
||||||
'text': self.text,
|
'text': self.text,
|
||||||
'start': format_time(self.start),
|
'start': format_time(self.start),
|
||||||
'end': format_time(self.end),
|
'end': format_time(self.end),
|
||||||
@@ -152,7 +157,7 @@ class FrontData():
|
|||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
_dict = {
|
_dict = {
|
||||||
'status': self.status,
|
'status': self.status,
|
||||||
'lines': [line.to_dict() for line in self.lines],
|
'lines': [line.to_dict() for line in self.lines if (line.text or line.speaker == -2)],
|
||||||
'buffer_transcription': self.buffer_transcription,
|
'buffer_transcription': self.buffer_transcription,
|
||||||
'buffer_diarization': self.buffer_diarization,
|
'buffer_diarization': self.buffer_diarization,
|
||||||
'remaining_time_transcription': self.remaining_time_transcription,
|
'remaining_time_transcription': self.remaining_time_transcription,
|
||||||
@@ -161,13 +166,18 @@ class FrontData():
|
|||||||
if self.error:
|
if self.error:
|
||||||
_dict['error'] = self.error
|
_dict['error'] = self.error
|
||||||
return _dict
|
return _dict
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ChangeSpeaker:
|
||||||
|
speaker: int
|
||||||
|
start: int
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class State():
|
class State():
|
||||||
tokens: list
|
tokens: list
|
||||||
|
last_validated_token: int
|
||||||
translated_segments: list
|
translated_segments: list
|
||||||
buffer_transcription: str
|
buffer_transcription: str
|
||||||
buffer_diarization: str
|
|
||||||
end_buffer: float
|
end_buffer: float
|
||||||
end_attributed_speaker: float
|
end_attributed_speaker: float
|
||||||
remaining_time_transcription: float
|
remaining_time_transcription: float
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import time
|
|||||||
import ctranslate2
|
import ctranslate2
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass, field
|
||||||
import huggingface_hub
|
import huggingface_hub
|
||||||
from whisperlivekit.translation.mapping_languages import get_nllb_code
|
from whisperlivekit.translation.mapping_languages import get_nllb_code
|
||||||
from whisperlivekit.timed_objects import Translation
|
from whisperlivekit.timed_objects import Translation
|
||||||
@@ -18,29 +18,46 @@ MIN_SILENCE_DURATION_DEL_BUFFER = 3 #After a silence of x seconds, we consider t
|
|||||||
@dataclass
|
@dataclass
|
||||||
class TranslationModel():
|
class TranslationModel():
|
||||||
translator: ctranslate2.Translator
|
translator: ctranslate2.Translator
|
||||||
tokenizer: dict
|
|
||||||
device: str
|
device: str
|
||||||
|
tokenizer: dict = field(default_factory=dict)
|
||||||
backend_type: str = 'ctranslate2'
|
backend_type: str = 'ctranslate2'
|
||||||
|
nllb_size: str = '600M'
|
||||||
|
|
||||||
|
def get_tokenizer(self, input_lang):
|
||||||
|
if not self.tokenizer.get(input_lang, False):
|
||||||
|
self.tokenizer[input_lang] = transformers.AutoTokenizer.from_pretrained(
|
||||||
|
f"facebook/nllb-200-distilled-{self.nllb_size}",
|
||||||
|
src_lang=input_lang,
|
||||||
|
clean_up_tokenization_spaces=True
|
||||||
|
)
|
||||||
|
return self.tokenizer[input_lang]
|
||||||
|
|
||||||
|
|
||||||
def load_model(src_langs, backend='ctranslate2', model_size='600M'):
|
def load_model(src_langs, nllb_backend='ctranslate2', nllb_size='600M'):
|
||||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
MODEL = f'nllb-200-distilled-{model_size}-ctranslate2'
|
MODEL = f'nllb-200-distilled-{nllb_size}-ctranslate2'
|
||||||
if backend=='ctranslate2':
|
if nllb_backend=='ctranslate2':
|
||||||
MODEL_GUY = 'entai2965'
|
MODEL_GUY = 'entai2965'
|
||||||
huggingface_hub.snapshot_download(MODEL_GUY + '/' + MODEL,local_dir=MODEL)
|
huggingface_hub.snapshot_download(MODEL_GUY + '/' + MODEL,local_dir=MODEL)
|
||||||
translator = ctranslate2.Translator(MODEL,device=device)
|
translator = ctranslate2.Translator(MODEL,device=device)
|
||||||
elif backend=='transformers':
|
elif nllb_backend=='transformers':
|
||||||
translator = transformers.AutoModelForSeq2SeqLM.from_pretrained(f"facebook/nllb-200-distilled-{model_size}")
|
translator = transformers.AutoModelForSeq2SeqLM.from_pretrained(f"facebook/nllb-200-distilled-{nllb_size}")
|
||||||
tokenizer = dict()
|
tokenizer = dict()
|
||||||
for src_lang in src_langs:
|
for src_lang in src_langs:
|
||||||
tokenizer[src_lang] = transformers.AutoTokenizer.from_pretrained(MODEL, src_lang=src_lang, clean_up_tokenization_spaces=True)
|
if src_lang != 'auto':
|
||||||
|
tokenizer[src_lang] = transformers.AutoTokenizer.from_pretrained(MODEL, src_lang=src_lang, clean_up_tokenization_spaces=True)
|
||||||
|
|
||||||
return TranslationModel(
|
translation_model = TranslationModel(
|
||||||
translator=translator,
|
translator=translator,
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
backend_type=backend,
|
backend_type=nllb_backend,
|
||||||
device = device
|
device = device,
|
||||||
|
nllb_size = nllb_size
|
||||||
)
|
)
|
||||||
|
for src_lang in src_langs:
|
||||||
|
if src_lang != 'auto':
|
||||||
|
translation_model.get_tokenizer(src_lang)
|
||||||
|
return translation_model
|
||||||
|
|
||||||
class OnlineTranslation:
|
class OnlineTranslation:
|
||||||
def __init__(self, translation_model: TranslationModel, input_languages: list, output_languages: list):
|
def __init__(self, translation_model: TranslationModel, input_languages: list, output_languages: list):
|
||||||
@@ -63,16 +80,12 @@ class OnlineTranslation:
|
|||||||
self.commited.extend(self.buffer[:i])
|
self.commited.extend(self.buffer[:i])
|
||||||
self.buffer = results[i:]
|
self.buffer = results[i:]
|
||||||
|
|
||||||
def translate(self, input, input_lang=None, output_lang=None):
|
def translate(self, input, input_lang, output_lang):
|
||||||
if not input:
|
if not input:
|
||||||
return ""
|
return ""
|
||||||
if input_lang is None:
|
|
||||||
input_lang = self.input_languages[0]
|
|
||||||
if output_lang is None:
|
|
||||||
output_lang = self.output_languages[0]
|
|
||||||
nllb_output_lang = get_nllb_code(output_lang)
|
nllb_output_lang = get_nllb_code(output_lang)
|
||||||
|
|
||||||
tokenizer = self.translation_model.tokenizer[input_lang]
|
tokenizer = self.translation_model.get_tokenizer(input_lang)
|
||||||
tokenizer_output = tokenizer(input, return_tensors="pt").to(self.translation_model.device)
|
tokenizer_output = tokenizer(input, return_tensors="pt").to(self.translation_model.device)
|
||||||
|
|
||||||
if self.translation_model.backend_type == 'ctranslate2':
|
if self.translation_model.backend_type == 'ctranslate2':
|
||||||
@@ -90,7 +103,15 @@ class OnlineTranslation:
|
|||||||
text = ' '.join([token.text for token in tokens])
|
text = ' '.join([token.text for token in tokens])
|
||||||
start = tokens[0].start
|
start = tokens[0].start
|
||||||
end = tokens[-1].end
|
end = tokens[-1].end
|
||||||
translated_text = self.translate(text)
|
if self.input_languages[0] == 'auto':
|
||||||
|
input_lang = tokens[0].detected_language
|
||||||
|
else:
|
||||||
|
input_lang = self.input_languages[0]
|
||||||
|
|
||||||
|
translated_text = self.translate(text,
|
||||||
|
input_lang,
|
||||||
|
self.output_languages[0]
|
||||||
|
)
|
||||||
translation = Translation(
|
translation = Translation(
|
||||||
text=translated_text,
|
text=translated_text,
|
||||||
start=start,
|
start=start,
|
||||||
@@ -136,7 +157,7 @@ if __name__ == '__main__':
|
|||||||
test = test_string.split(' ')
|
test = test_string.split(' ')
|
||||||
step = len(test) // 3
|
step = len(test) // 3
|
||||||
|
|
||||||
shared_model = load_model([input_lang], backend='ctranslate2')
|
shared_model = load_model([input_lang], nllb_backend='ctranslate2')
|
||||||
online_translation = OnlineTranslation(shared_model, input_languages=[input_lang], output_languages=[output_lang])
|
online_translation = OnlineTranslation(shared_model, input_languages=[input_lang], output_languages=[output_lang])
|
||||||
|
|
||||||
beg_inference = time.time()
|
beg_inference = time.time()
|
||||||
|
|||||||
@@ -72,6 +72,12 @@
|
|||||||
--label-trans-text: #111111;
|
--label-trans-text: #111111;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
html.is-extension
|
||||||
|
{
|
||||||
|
width: 350px;
|
||||||
|
height: 500px;
|
||||||
|
}
|
||||||
|
|
||||||
body {
|
body {
|
||||||
font-family: ui-sans-serif, system-ui, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
|
font-family: ui-sans-serif, system-ui, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
|
||||||
margin: 0;
|
margin: 0;
|
||||||
@@ -191,6 +197,14 @@ body {
|
|||||||
justify-content: center;
|
justify-content: center;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
gap: 15px;
|
gap: 15px;
|
||||||
|
position: relative;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.buttons-container {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 15px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.settings {
|
.settings {
|
||||||
@@ -200,6 +214,66 @@ body {
|
|||||||
gap: 12px;
|
gap: 12px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.settings-toggle {
|
||||||
|
width: 40px;
|
||||||
|
height: 40px;
|
||||||
|
border: none;
|
||||||
|
border-radius: 50%;
|
||||||
|
background-color: var(--button-bg);
|
||||||
|
border: 1px solid var(--button-border);
|
||||||
|
cursor: pointer;
|
||||||
|
display: none;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
transition: all 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
.settings-toggle:hover {
|
||||||
|
background-color: var(--chip-bg);
|
||||||
|
}
|
||||||
|
|
||||||
|
.settings-toggle.active {
|
||||||
|
background-color: var(--chip-bg);
|
||||||
|
}
|
||||||
|
|
||||||
|
.settings-toggle img {
|
||||||
|
width: 20px;
|
||||||
|
height: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 10000px) {
|
||||||
|
.settings-toggle {
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
|
||||||
|
.settings {
|
||||||
|
display: none;
|
||||||
|
background: var(--bg);
|
||||||
|
border: 1px solid var(--border);
|
||||||
|
border-radius: 18px;
|
||||||
|
padding: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.settings.visible {
|
||||||
|
display: flex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 600px) {
|
||||||
|
.settings-container {
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
gap: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.buttons-container {
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
gap: 15px;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
.field {
|
.field {
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
@@ -409,7 +483,6 @@ label {
|
|||||||
|
|
||||||
.buffer_diarization {
|
.buffer_diarization {
|
||||||
color: var(--label-dia-text);
|
color: var(--label-dia-text);
|
||||||
margin-left: 4px;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.buffer_transcription {
|
.buffer_transcription {
|
||||||
@@ -454,7 +527,7 @@ label {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* for smaller screens */
|
/* for smaller screens */
|
||||||
@media (max-width: 768px) {
|
@media (max-width: 200px) {
|
||||||
.header-container {
|
.header-container {
|
||||||
padding: 15px;
|
padding: 15px;
|
||||||
}
|
}
|
||||||
@@ -464,6 +537,10 @@ label {
|
|||||||
gap: 10px;
|
gap: 10px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.buttons-container {
|
||||||
|
gap: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
.settings {
|
.settings {
|
||||||
justify-content: center;
|
justify-content: center;
|
||||||
gap: 8px;
|
gap: 8px;
|
||||||
@@ -522,8 +599,6 @@ label {
|
|||||||
.label_language {
|
.label_language {
|
||||||
background-color: var(--chip-bg);
|
background-color: var(--chip-bg);
|
||||||
margin-bottom: 0px;
|
margin-bottom: 0px;
|
||||||
margin-top: 5px;
|
|
||||||
height: 18.5px;
|
|
||||||
border-radius: 100px;
|
border-radius: 100px;
|
||||||
padding: 2px 8px;
|
padding: 2px 8px;
|
||||||
margin-left: 10px;
|
margin-left: 10px;
|
||||||
@@ -534,22 +609,6 @@ label {
|
|||||||
color: var(--muted);
|
color: var(--muted);
|
||||||
}
|
}
|
||||||
|
|
||||||
.label_language img {
|
|
||||||
width: 12px;
|
|
||||||
height: 12px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.silence-icon {
|
|
||||||
width: 14px;
|
|
||||||
height: 14px;
|
|
||||||
vertical-align: text-bottom;
|
|
||||||
}
|
|
||||||
|
|
||||||
.speaker-icon {
|
|
||||||
width: 16px;
|
|
||||||
height: 16px;
|
|
||||||
vertical-align: text-bottom;
|
|
||||||
}
|
|
||||||
|
|
||||||
.speaker-badge {
|
.speaker-badge {
|
||||||
display: inline-flex;
|
display: inline-flex;
|
||||||
|
|||||||
@@ -5,23 +5,29 @@
|
|||||||
<meta charset="UTF-8" />
|
<meta charset="UTF-8" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
<title>WhisperLiveKit</title>
|
<title>WhisperLiveKit</title>
|
||||||
<link rel="stylesheet" href="/web/live_transcription.css" />
|
<link rel="stylesheet" href="live_transcription.css" />
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
<body>
|
<body>
|
||||||
<div class="header-container">
|
<div class="header-container">
|
||||||
<div class="settings-container">
|
<div class="settings-container">
|
||||||
<button id="recordButton">
|
<div class="buttons-container">
|
||||||
<div class="shape-container">
|
<button id="recordButton">
|
||||||
<div class="shape"></div>
|
<div class="shape-container">
|
||||||
</div>
|
<div class="shape"></div>
|
||||||
<div class="recording-info">
|
|
||||||
<div class="wave-container">
|
|
||||||
<canvas id="waveCanvas"></canvas>
|
|
||||||
</div>
|
</div>
|
||||||
<div class="timer">00:00</div>
|
<div class="recording-info">
|
||||||
</div>
|
<div class="wave-container">
|
||||||
</button>
|
<canvas id="waveCanvas"></canvas>
|
||||||
|
</div>
|
||||||
|
<div class="timer">00:00</div>
|
||||||
|
</div>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
<button id="settingsToggle" class="settings-toggle" title="Show/hide settings">
|
||||||
|
<img src="web/src/settings.svg" alt="Settings" />
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div class="settings">
|
<div class="settings">
|
||||||
<div class="field">
|
<div class="field">
|
||||||
@@ -67,7 +73,7 @@
|
|||||||
<div id="linesTranscript"></div>
|
<div id="linesTranscript"></div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<script src="/web/live_transcription.js"></script>
|
<script src="live_transcription.js"></script>
|
||||||
</body>
|
</body>
|
||||||
|
|
||||||
</html>
|
</html>
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
/* Theme, WebSocket, recording, rendering logic extracted from inline script and adapted for segmented theme control and WS caption */
|
const isExtension = typeof chrome !== 'undefined' && chrome.runtime && chrome.runtime.getURL;
|
||||||
|
if (isExtension) {
|
||||||
|
document.documentElement.classList.add('is-extension');
|
||||||
|
}
|
||||||
|
const isWebContext = !isExtension;
|
||||||
|
|
||||||
let isRecording = false;
|
let isRecording = false;
|
||||||
let websocket = null;
|
let websocket = null;
|
||||||
@@ -25,6 +29,8 @@ let selectedMicrophoneId = null;
|
|||||||
let serverUseAudioWorklet = null;
|
let serverUseAudioWorklet = null;
|
||||||
let configReadyResolve;
|
let configReadyResolve;
|
||||||
const configReady = new Promise((r) => (configReadyResolve = r));
|
const configReady = new Promise((r) => (configReadyResolve = r));
|
||||||
|
let outputAudioContext = null;
|
||||||
|
let audioSource = null;
|
||||||
|
|
||||||
waveCanvas.width = 60 * (window.devicePixelRatio || 1);
|
waveCanvas.width = 60 * (window.devicePixelRatio || 1);
|
||||||
waveCanvas.height = 30 * (window.devicePixelRatio || 1);
|
waveCanvas.height = 30 * (window.devicePixelRatio || 1);
|
||||||
@@ -40,6 +46,26 @@ const timerElement = document.querySelector(".timer");
|
|||||||
const themeRadios = document.querySelectorAll('input[name="theme"]');
|
const themeRadios = document.querySelectorAll('input[name="theme"]');
|
||||||
const microphoneSelect = document.getElementById("microphoneSelect");
|
const microphoneSelect = document.getElementById("microphoneSelect");
|
||||||
|
|
||||||
|
const settingsToggle = document.getElementById("settingsToggle");
|
||||||
|
const settingsDiv = document.querySelector(".settings");
|
||||||
|
|
||||||
|
// if (isExtension) {
|
||||||
|
// chrome.runtime.onInstalled.addListener((details) => {
|
||||||
|
// if (details.reason.search(/install/g) === -1) {
|
||||||
|
// return;
|
||||||
|
// }
|
||||||
|
// chrome.tabs.create({
|
||||||
|
// url: chrome.runtime.getURL("welcome.html"),
|
||||||
|
// active: true
|
||||||
|
// });
|
||||||
|
// });
|
||||||
|
// }
|
||||||
|
|
||||||
|
const translationIcon = `<svg xmlns="http://www.w3.org/2000/svg" height="12px" viewBox="0 -960 960 960" width="12px" fill="#5f6368"><path d="m603-202-34 97q-4 11-14 18t-22 7q-20 0-32.5-16.5T496-133l152-402q5-11 15-18t22-7h30q12 0 22 7t15 18l152 403q8 19-4 35.5T868-80q-13 0-22.5-7T831-106l-34-96H603ZM362-401 188-228q-11 11-27.5 11.5T132-228q-11-11-11-28t11-28l174-174q-35-35-63.5-80T190-640h84q20 39 40 68t48 58q33-33 68.5-92.5T484-720H80q-17 0-28.5-11.5T40-760q0-17 11.5-28.5T80-800h240v-40q0-17 11.5-28.5T360-880q17 0 28.5 11.5T400-840v40h240q17 0 28.5 11.5T680-760q0 17-11.5 28.5T640-720h-76q-21 72-63 148t-83 116l96 98-30 82-122-125Zm266 129h144l-72-204-72 204Z"/></svg>`
|
||||||
|
const silenceIcon = `<svg xmlns="http://www.w3.org/2000/svg" style="vertical-align: text-bottom;" height="14px" viewBox="0 -960 960 960" width="14px" fill="#5f6368"><path d="M514-556 320-752q9-3 19-5.5t21-2.5q66 0 113 47t47 113q0 11-1.5 22t-4.5 22ZM40-200v-32q0-33 17-62t47-44q51-26 115-44t141-18q26 0 49.5 2.5T456-392l-56-54q-9 3-19 4.5t-21 1.5q-66 0-113-47t-47-113q0-11 1.5-21t4.5-19L84-764q-11-11-11-28t11-28q12-12 28.5-12t27.5 12l675 685q11 11 11.5 27.5T816-80q-11 13-28 12.5T759-80L641-200h39q0 33-23.5 56.5T600-120H120q-33 0-56.5-23.5T40-200Zm80 0h480v-32q0-14-4.5-19.5T580-266q-36-18-92.5-36T360-320q-71 0-127.5 18T140-266q-9 5-14.5 14t-5.5 20v32Zm240 0Zm560-400q0 69-24.5 131.5T829-355q-12 14-30 15t-32-13q-13-13-12-31t12-33q30-38 46.5-85t16.5-98q0-51-16.5-97T767-781q-12-15-12.5-33t12.5-32q13-14 31.5-13.5T829-845q42 51 66.5 113.5T920-600Zm-182 0q0 32-10 61.5T700-484q-11 15-29.5 15.5T638-482q-13-13-13.5-31.5T633-549q6-11 9.5-24t3.5-27q0-14-3.5-27t-9.5-25q-9-17-8.5-35t13.5-31q14-14 32.5-13.5T700-716q18 25 28 54.5t10 61.5Z"/></svg>`;
|
||||||
|
const languageIcon = `<svg xmlns="http://www.w3.org/2000/svg" height="12" viewBox="0 -960 960 960" width="12" fill="#5f6368"><path d="M480-80q-82 0-155-31.5t-127.5-86Q143-252 111.5-325T80-480q0-83 31.5-155.5t86-127Q252-817 325-848.5T480-880q83 0 155.5 31.5t127 86q54.5 54.5 86 127T880-480q0 82-31.5 155t-86 127.5q-54.5 54.5-127 86T480-80Zm0-82q26-36 45-75t31-83H404q12 44 31 83t45 75Zm-104-16q-18-33-31.5-68.5T322-320H204q29 50 72.5 87t99.5 55Zm208 0q56-18 99.5-55t72.5-87H638q-9 38-22.5 73.5T584-178ZM170-400h136q-3-20-4.5-39.5T300-480q0-21 1.5-40.5T306-560H170q-5 20-7.5 39.5T160-480q0 21 2.5 40.5T170-400Zm216 0h188q3-20 4.5-39.5T580-480q0-21-1.5-40.5T574-560H386q-3 20-4.5 39.5T380-480q0 21 1.5 40.5T386-400Zm268 0h136q5-20 7.5-39.5T800-480q0-21-2.5-40.5T790-560H654q3 20 4.5 39.5T660-480q0 21-1.5 40.5T654-400Zm-16-240h118q-29-50-72.5-87T584-782q18 33 31.5 68.5T638-640Zm-234 0h152q-12-44-31-83t-45-75q-26 36-45 75t-31 83Zm-200 0h118q9-38 22.5-73.5T376-782q-56 18-99.5 55T204-640Z"/></svg>`
|
||||||
|
const speakerIcon = `<svg xmlns="http://www.w3.org/2000/svg" height="16px" style="vertical-align: text-bottom;" viewBox="0 -960 960 960" width="16px" fill="#5f6368"><path d="M480-480q-66 0-113-47t-47-113q0-66 47-113t113-47q66 0 113 47t47 113q0 66-47 113t-113 47ZM160-240v-32q0-34 17.5-62.5T224-378q62-31 126-46.5T480-440q66 0 130 15.5T736-378q29 15 46.5 43.5T800-272v32q0 33-23.5 56.5T720-160H240q-33 0-56.5-23.5T160-240Zm80 0h480v-32q0-11-5.5-20T700-306q-54-27-109-40.5T480-360q-56 0-111 13.5T260-306q-9 5-14.5 14t-5.5 20v32Zm240-320q33 0 56.5-23.5T560-640q0-33-23.5-56.5T480-720q-33 0-56.5 23.5T400-640q0 33 23.5 56.5T480-560Zm0-80Zm0 400Z"/></svg>`;
|
||||||
|
|
||||||
function getWaveStroke() {
|
function getWaveStroke() {
|
||||||
const styles = getComputedStyle(document.documentElement);
|
const styles = getComputedStyle(document.documentElement);
|
||||||
const v = styles.getPropertyValue("--wave-stroke").trim();
|
const v = styles.getPropertyValue("--wave-stroke").trim();
|
||||||
@@ -151,10 +177,16 @@ function fmt1(x) {
|
|||||||
return Number.isFinite(n) ? n.toFixed(1) : x;
|
return Number.isFinite(n) ? n.toFixed(1) : x;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default WebSocket URL computation
|
let host, port, protocol;
|
||||||
const host = window.location.hostname || "localhost";
|
port = 8000;
|
||||||
const port = window.location.port;
|
if (isExtension) {
|
||||||
const protocol = window.location.protocol === "https:" ? "wss" : "ws";
|
host = "localhost";
|
||||||
|
protocol = "ws";
|
||||||
|
} else {
|
||||||
|
host = window.location.hostname || "localhost";
|
||||||
|
port = window.location.port;
|
||||||
|
protocol = window.location.protocol === "https:" ? "wss" : "ws";
|
||||||
|
}
|
||||||
const defaultWebSocketUrl = `${protocol}://${host}${port ? ":" + port : ""}/asr`;
|
const defaultWebSocketUrl = `${protocol}://${host}${port ? ":" + port : ""}/asr`;
|
||||||
|
|
||||||
// Populate default caption and input
|
// Populate default caption and input
|
||||||
@@ -335,19 +367,17 @@ function renderLinesWithBuffer(
|
|||||||
|
|
||||||
let speakerLabel = "";
|
let speakerLabel = "";
|
||||||
if (item.speaker === -2) {
|
if (item.speaker === -2) {
|
||||||
const silenceIcon = `<img class="silence-icon" src="/web/src/silence.svg" alt="Silence" />`;
|
|
||||||
speakerLabel = `<span class="silence">${silenceIcon}<span id='timeInfo'>${timeInfo}</span></span>`;
|
speakerLabel = `<span class="silence">${silenceIcon}<span id='timeInfo'>${timeInfo}</span></span>`;
|
||||||
} else if (item.speaker == 0 && !isFinalizing) {
|
} else if (item.speaker == 0 && !isFinalizing) {
|
||||||
speakerLabel = `<span class='loading'><span class="spinner"></span><span id='timeInfo'><span class="loading-diarization-value">${fmt1(
|
speakerLabel = `<span class='loading'><span class="spinner"></span><span id='timeInfo'><span class="loading-diarization-value">${fmt1(
|
||||||
remaining_time_diarization
|
remaining_time_diarization
|
||||||
)}</span> second(s) of audio are undergoing diarization</span></span>`;
|
)}</span> second(s) of audio are undergoing diarization</span></span>`;
|
||||||
} else if (item.speaker !== 0) {
|
} else if (item.speaker !== 0) {
|
||||||
const speakerIcon = `<img class="speaker-icon" src="/web/src/speaker.svg" alt="Speaker ${item.speaker}" />`;
|
|
||||||
const speakerNum = `<span class="speaker-badge">${item.speaker}</span>`;
|
const speakerNum = `<span class="speaker-badge">${item.speaker}</span>`;
|
||||||
speakerLabel = `<span id="speaker">${speakerIcon}${speakerNum}<span id='timeInfo'>${timeInfo}</span></span>`;
|
speakerLabel = `<span id="speaker">${speakerIcon}${speakerNum}<span id='timeInfo'>${timeInfo}</span></span>`;
|
||||||
|
|
||||||
if (item.detected_language) {
|
if (item.detected_language) {
|
||||||
speakerLabel += `<span class="label_language"><img src="/web/src/language.svg" alt="Detected language" width="12" height="12" /><span>${item.detected_language}</span></span>`;
|
speakerLabel += `<span class="label_language">${languageIcon}<span>${item.detected_language}</span></span>`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -387,10 +417,13 @@ function renderLinesWithBuffer(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (item.translation) {
|
if (item.translation) {
|
||||||
currentLineText += `<div class="label_translation">
|
currentLineText += `
|
||||||
<img src="/web/src/translate.svg" alt="Translation" width="12" height="12" />
|
<div>
|
||||||
<span>${item.translation}</span>
|
<div class="label_translation">
|
||||||
</div>`;
|
${translationIcon}
|
||||||
|
<span>${item.translation}</span>
|
||||||
|
</div>
|
||||||
|
</div>`;
|
||||||
}
|
}
|
||||||
|
|
||||||
return currentLineText.trim().length > 0 || speakerLabel.length > 0
|
return currentLineText.trim().length > 0 || speakerLabel.length > 0
|
||||||
@@ -465,11 +498,44 @@ async function startRecording() {
|
|||||||
console.log("Error acquiring wake lock.");
|
console.log("Error acquiring wake lock.");
|
||||||
}
|
}
|
||||||
|
|
||||||
const audioConstraints = selectedMicrophoneId
|
let stream;
|
||||||
? { audio: { deviceId: { exact: selectedMicrophoneId } } }
|
|
||||||
: { audio: true };
|
// chromium extension. in the future, both chrome page audio and mic will be used
|
||||||
|
if (isExtension) {
|
||||||
const stream = await navigator.mediaDevices.getUserMedia(audioConstraints);
|
try {
|
||||||
|
stream = await new Promise((resolve, reject) => {
|
||||||
|
chrome.tabCapture.capture({audio: true}, (s) => {
|
||||||
|
if (s) {
|
||||||
|
resolve(s);
|
||||||
|
} else {
|
||||||
|
reject(new Error('Tab capture failed or not available'));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
outputAudioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||||
|
audioSource = outputAudioContext.createMediaStreamSource(stream);
|
||||||
|
audioSource.connect(outputAudioContext.destination);
|
||||||
|
} catch (audioError) {
|
||||||
|
console.warn('could not preserve system audio:', audioError);
|
||||||
|
}
|
||||||
|
|
||||||
|
statusText.textContent = "Using tab audio capture.";
|
||||||
|
} catch (tabError) {
|
||||||
|
console.log('Tab capture not available, falling back to microphone', tabError);
|
||||||
|
const audioConstraints = selectedMicrophoneId
|
||||||
|
? { audio: { deviceId: { exact: selectedMicrophoneId } } }
|
||||||
|
: { audio: true };
|
||||||
|
stream = await navigator.mediaDevices.getUserMedia(audioConstraints);
|
||||||
|
statusText.textContent = "Using microphone audio.";
|
||||||
|
}
|
||||||
|
} else if (isWebContext) {
|
||||||
|
const audioConstraints = selectedMicrophoneId
|
||||||
|
? { audio: { deviceId: { exact: selectedMicrophoneId } } }
|
||||||
|
: { audio: true };
|
||||||
|
stream = await navigator.mediaDevices.getUserMedia(audioConstraints);
|
||||||
|
}
|
||||||
|
|
||||||
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
||||||
analyser = audioContext.createAnalyser();
|
analyser = audioContext.createAnalyser();
|
||||||
@@ -603,6 +669,16 @@ async function stopRecording() {
|
|||||||
audioContext = null;
|
audioContext = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (audioSource) {
|
||||||
|
audioSource.disconnect();
|
||||||
|
audioSource = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (outputAudioContext && outputAudioContext.state !== "closed") {
|
||||||
|
outputAudioContext.close()
|
||||||
|
outputAudioContext = null;
|
||||||
|
}
|
||||||
|
|
||||||
if (animationFrame) {
|
if (animationFrame) {
|
||||||
cancelAnimationFrame(animationFrame);
|
cancelAnimationFrame(animationFrame);
|
||||||
animationFrame = null;
|
animationFrame = null;
|
||||||
@@ -654,7 +730,7 @@ function updateUI() {
|
|||||||
statusText.textContent = "Please wait for processing to complete...";
|
statusText.textContent = "Please wait for processing to complete...";
|
||||||
}
|
}
|
||||||
} else if (isRecording) {
|
} else if (isRecording) {
|
||||||
statusText.textContent = "Recording...";
|
statusText.textContent = "";
|
||||||
} else {
|
} else {
|
||||||
if (
|
if (
|
||||||
statusText.textContent !== "Finished processing audio! Ready to record again." &&
|
statusText.textContent !== "Finished processing audio! Ready to record again." &&
|
||||||
@@ -688,3 +764,40 @@ navigator.mediaDevices.addEventListener('devicechange', async () => {
|
|||||||
console.log("Error re-enumerating microphones:", error);
|
console.log("Error re-enumerating microphones:", error);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
settingsToggle.addEventListener("click", () => {
|
||||||
|
settingsDiv.classList.toggle("visible");
|
||||||
|
settingsToggle.classList.toggle("active");
|
||||||
|
});
|
||||||
|
|
||||||
|
if (isExtension) {
|
||||||
|
async function checkAndRequestPermissions() {
|
||||||
|
const micPermission = await navigator.permissions.query({
|
||||||
|
name: "microphone",
|
||||||
|
});
|
||||||
|
|
||||||
|
const permissionDisplay = document.getElementById("audioPermission");
|
||||||
|
if (permissionDisplay) {
|
||||||
|
permissionDisplay.innerText = `MICROPHONE: ${micPermission.state}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if (micPermission.state !== "granted") {
|
||||||
|
// chrome.tabs.create({ url: "welcome.html" });
|
||||||
|
// }
|
||||||
|
|
||||||
|
const intervalId = setInterval(async () => {
|
||||||
|
const micPermission = await navigator.permissions.query({
|
||||||
|
name: "microphone",
|
||||||
|
});
|
||||||
|
if (micPermission.state === "granted") {
|
||||||
|
if (permissionDisplay) {
|
||||||
|
permissionDisplay.innerText = `MICROPHONE: ${micPermission.state}`;
|
||||||
|
}
|
||||||
|
clearInterval(intervalId);
|
||||||
|
}
|
||||||
|
}, 100);
|
||||||
|
}
|
||||||
|
|
||||||
|
void checkAndRequestPermissions();
|
||||||
|
}
|
||||||
|
|||||||
@@ -23,6 +23,24 @@ def get_inline_ui_html():
|
|||||||
with resources.files('whisperlivekit.web').joinpath('live_transcription.js').open('r', encoding='utf-8') as f:
|
with resources.files('whisperlivekit.web').joinpath('live_transcription.js').open('r', encoding='utf-8') as f:
|
||||||
js_content = f.read()
|
js_content = f.read()
|
||||||
|
|
||||||
|
with resources.files('whisperlivekit.web').joinpath('pcm_worklet.js').open('r', encoding='utf-8') as f:
|
||||||
|
worklet_code = f.read()
|
||||||
|
with resources.files('whisperlivekit.web').joinpath('recorder_worker.js').open('r', encoding='utf-8') as f:
|
||||||
|
worker_code = f.read()
|
||||||
|
|
||||||
|
js_content = js_content.replace(
|
||||||
|
'await audioContext.audioWorklet.addModule("/web/pcm_worklet.js");',
|
||||||
|
'const workletBlob = new Blob([`' + worklet_code + '`], { type: "application/javascript" });\n' +
|
||||||
|
'const workletUrl = URL.createObjectURL(workletBlob);\n' +
|
||||||
|
'await audioContext.audioWorklet.addModule(workletUrl);'
|
||||||
|
)
|
||||||
|
js_content = js_content.replace(
|
||||||
|
'recorderWorker = new Worker("/web/recorder_worker.js");',
|
||||||
|
'const workerBlob = new Blob([`' + worker_code + '`], { type: "application/javascript" });\n' +
|
||||||
|
'const workerUrl = URL.createObjectURL(workerBlob);\n' +
|
||||||
|
'recorderWorker = new Worker(workerUrl);'
|
||||||
|
)
|
||||||
|
|
||||||
# SVG files
|
# SVG files
|
||||||
with resources.files('whisperlivekit.web').joinpath('src', 'system_mode.svg').open('r', encoding='utf-8') as f:
|
with resources.files('whisperlivekit.web').joinpath('src', 'system_mode.svg').open('r', encoding='utf-8') as f:
|
||||||
system_svg = f.read()
|
system_svg = f.read()
|
||||||
@@ -33,15 +51,18 @@ def get_inline_ui_html():
|
|||||||
with resources.files('whisperlivekit.web').joinpath('src', 'dark_mode.svg').open('r', encoding='utf-8') as f:
|
with resources.files('whisperlivekit.web').joinpath('src', 'dark_mode.svg').open('r', encoding='utf-8') as f:
|
||||||
dark_svg = f.read()
|
dark_svg = f.read()
|
||||||
dark_data_uri = f"data:image/svg+xml;base64,{base64.b64encode(dark_svg.encode('utf-8')).decode('utf-8')}"
|
dark_data_uri = f"data:image/svg+xml;base64,{base64.b64encode(dark_svg.encode('utf-8')).decode('utf-8')}"
|
||||||
|
with resources.files('whisperlivekit.web').joinpath('src', 'settings.svg').open('r', encoding='utf-8') as f:
|
||||||
|
settings = f.read()
|
||||||
|
settings_uri = f"data:image/svg+xml;base64,{base64.b64encode(settings.encode('utf-8')).decode('utf-8')}"
|
||||||
|
|
||||||
# Replace external references
|
# Replace external references
|
||||||
html_content = html_content.replace(
|
html_content = html_content.replace(
|
||||||
'<link rel="stylesheet" href="/web/live_transcription.css" />',
|
'<link rel="stylesheet" href="live_transcription.css" />',
|
||||||
f'<style>\n{css_content}\n</style>'
|
f'<style>\n{css_content}\n</style>'
|
||||||
)
|
)
|
||||||
|
|
||||||
html_content = html_content.replace(
|
html_content = html_content.replace(
|
||||||
'<script src="/web/live_transcription.js"></script>',
|
'<script src="live_transcription.js"></script>',
|
||||||
f'<script>\n{js_content}\n</script>'
|
f'<script>\n{js_content}\n</script>'
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -61,6 +82,11 @@ def get_inline_ui_html():
|
|||||||
f'<img src="{dark_data_uri}" alt="" />'
|
f'<img src="{dark_data_uri}" alt="" />'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
html_content = html_content.replace(
|
||||||
|
'<img src="web/src/settings.svg" alt="Settings" />',
|
||||||
|
f'<img src="{settings_uri}" alt="" />'
|
||||||
|
)
|
||||||
|
|
||||||
return html_content
|
return html_content
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -11,14 +11,14 @@ class ASRBase:
|
|||||||
sep = " " # join transcribe words with this character (" " for whisper_timestamped,
|
sep = " " # join transcribe words with this character (" " for whisper_timestamped,
|
||||||
# "" for faster-whisper because it emits the spaces when needed)
|
# "" for faster-whisper because it emits the spaces when needed)
|
||||||
|
|
||||||
def __init__(self, lan, modelsize=None, cache_dir=None, model_dir=None, logfile=sys.stderr):
|
def __init__(self, lan, model_size=None, cache_dir=None, model_dir=None, logfile=sys.stderr):
|
||||||
self.logfile = logfile
|
self.logfile = logfile
|
||||||
self.transcribe_kargs = {}
|
self.transcribe_kargs = {}
|
||||||
if lan == "auto":
|
if lan == "auto":
|
||||||
self.original_language = None
|
self.original_language = None
|
||||||
else:
|
else:
|
||||||
self.original_language = lan
|
self.original_language = lan
|
||||||
self.model = self.load_model(modelsize, cache_dir, model_dir)
|
self.model = self.load_model(model_size, cache_dir, model_dir)
|
||||||
|
|
||||||
def with_offset(self, offset: float) -> ASRToken:
|
def with_offset(self, offset: float) -> ASRToken:
|
||||||
# This method is kept for compatibility (typically you will use ASRToken.with_offset)
|
# This method is kept for compatibility (typically you will use ASRToken.with_offset)
|
||||||
@@ -27,7 +27,7 @@ class ASRBase:
|
|||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"ASRToken(start={self.start:.2f}, end={self.end:.2f}, text={self.text!r})"
|
return f"ASRToken(start={self.start:.2f}, end={self.end:.2f}, text={self.text!r})"
|
||||||
|
|
||||||
def load_model(self, modelsize, cache_dir, model_dir):
|
def load_model(self, model_size, cache_dir, model_dir):
|
||||||
raise NotImplementedError("must be implemented in the child class")
|
raise NotImplementedError("must be implemented in the child class")
|
||||||
|
|
||||||
def transcribe(self, audio, init_prompt=""):
|
def transcribe(self, audio, init_prompt=""):
|
||||||
@@ -41,7 +41,7 @@ class WhisperTimestampedASR(ASRBase):
|
|||||||
"""Uses whisper_timestamped as the backend."""
|
"""Uses whisper_timestamped as the backend."""
|
||||||
sep = " "
|
sep = " "
|
||||||
|
|
||||||
def load_model(self, modelsize=None, cache_dir=None, model_dir=None):
|
def load_model(self, model_size=None, cache_dir=None, model_dir=None):
|
||||||
import whisper
|
import whisper
|
||||||
import whisper_timestamped
|
import whisper_timestamped
|
||||||
from whisper_timestamped import transcribe_timestamped
|
from whisper_timestamped import transcribe_timestamped
|
||||||
@@ -49,7 +49,7 @@ class WhisperTimestampedASR(ASRBase):
|
|||||||
self.transcribe_timestamped = transcribe_timestamped
|
self.transcribe_timestamped = transcribe_timestamped
|
||||||
if model_dir is not None:
|
if model_dir is not None:
|
||||||
logger.debug("ignoring model_dir, not implemented")
|
logger.debug("ignoring model_dir, not implemented")
|
||||||
return whisper.load_model(modelsize, download_root=cache_dir)
|
return whisper.load_model(model_size, download_root=cache_dir)
|
||||||
|
|
||||||
def transcribe(self, audio, init_prompt=""):
|
def transcribe(self, audio, init_prompt=""):
|
||||||
result = self.transcribe_timestamped(
|
result = self.transcribe_timestamped(
|
||||||
@@ -88,17 +88,17 @@ class FasterWhisperASR(ASRBase):
|
|||||||
"""Uses faster-whisper as the backend."""
|
"""Uses faster-whisper as the backend."""
|
||||||
sep = ""
|
sep = ""
|
||||||
|
|
||||||
def load_model(self, modelsize=None, cache_dir=None, model_dir=None):
|
def load_model(self, model_size=None, cache_dir=None, model_dir=None):
|
||||||
from faster_whisper import WhisperModel
|
from faster_whisper import WhisperModel
|
||||||
|
|
||||||
if model_dir is not None:
|
if model_dir is not None:
|
||||||
logger.debug(f"Loading whisper model from model_dir {model_dir}. "
|
logger.debug(f"Loading whisper model from model_dir {model_dir}. "
|
||||||
f"modelsize and cache_dir parameters are not used.")
|
f"model_size and cache_dir parameters are not used.")
|
||||||
model_size_or_path = model_dir
|
model_size_or_path = model_dir
|
||||||
elif modelsize is not None:
|
elif model_size is not None:
|
||||||
model_size_or_path = modelsize
|
model_size_or_path = model_size
|
||||||
else:
|
else:
|
||||||
raise ValueError("Either modelsize or model_dir must be set")
|
raise ValueError("Either model_size or model_dir must be set")
|
||||||
device = "auto" # Allow CTranslate2 to decide available device
|
device = "auto" # Allow CTranslate2 to decide available device
|
||||||
compute_type = "auto" # Allow CTranslate2 to decide faster compute type
|
compute_type = "auto" # Allow CTranslate2 to decide faster compute type
|
||||||
|
|
||||||
@@ -149,18 +149,18 @@ class MLXWhisper(ASRBase):
|
|||||||
"""
|
"""
|
||||||
sep = ""
|
sep = ""
|
||||||
|
|
||||||
def load_model(self, modelsize=None, cache_dir=None, model_dir=None):
|
def load_model(self, model_size=None, cache_dir=None, model_dir=None):
|
||||||
from mlx_whisper.transcribe import ModelHolder, transcribe
|
from mlx_whisper.transcribe import ModelHolder, transcribe
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
|
|
||||||
if model_dir is not None:
|
if model_dir is not None:
|
||||||
logger.debug(f"Loading whisper model from model_dir {model_dir}. modelsize parameter is not used.")
|
logger.debug(f"Loading whisper model from model_dir {model_dir}. model_size parameter is not used.")
|
||||||
model_size_or_path = model_dir
|
model_size_or_path = model_dir
|
||||||
elif modelsize is not None:
|
elif model_size is not None:
|
||||||
model_size_or_path = self.translate_model_name(modelsize)
|
model_size_or_path = self.translate_model_name(model_size)
|
||||||
logger.debug(f"Loading whisper model {modelsize}. You use mlx whisper, so {model_size_or_path} will be used.")
|
logger.debug(f"Loading whisper model {model_size}. You use mlx whisper, so {model_size_or_path} will be used.")
|
||||||
else:
|
else:
|
||||||
raise ValueError("Either modelsize or model_dir must be set")
|
raise ValueError("Either model_size or model_dir must be set")
|
||||||
|
|
||||||
self.model_size_or_path = model_size_or_path
|
self.model_size_or_path = model_size_or_path
|
||||||
dtype = mx.float16
|
dtype = mx.float16
|
||||||
|
|||||||
@@ -106,9 +106,6 @@ class OnlineASRProcessor:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
asr,
|
asr,
|
||||||
tokenize_method: Optional[callable] = None,
|
|
||||||
buffer_trimming: Tuple[str, float] = ("segment", 15),
|
|
||||||
confidence_validation = False,
|
|
||||||
logfile=sys.stderr,
|
logfile=sys.stderr,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
@@ -119,13 +116,14 @@ class OnlineASRProcessor:
|
|||||||
buffer_trimming: A tuple (option, seconds), where option is either "sentence" or "segment".
|
buffer_trimming: A tuple (option, seconds), where option is either "sentence" or "segment".
|
||||||
"""
|
"""
|
||||||
self.asr = asr
|
self.asr = asr
|
||||||
self.tokenize = tokenize_method
|
self.tokenize = asr.tokenizer
|
||||||
self.logfile = logfile
|
self.logfile = logfile
|
||||||
self.confidence_validation = confidence_validation
|
self.confidence_validation = asr.confidence_validation
|
||||||
self.global_time_offset = 0.0
|
self.global_time_offset = 0.0
|
||||||
self.init()
|
self.init()
|
||||||
|
|
||||||
self.buffer_trimming_way, self.buffer_trimming_sec = buffer_trimming
|
self.buffer_trimming_way = asr.buffer_trimming
|
||||||
|
self.buffer_trimming_sec = asr.buffer_trimming_sec
|
||||||
|
|
||||||
if self.buffer_trimming_way not in ["sentence", "segment"]:
|
if self.buffer_trimming_way not in ["sentence", "segment"]:
|
||||||
raise ValueError("buffer_trimming must be either 'sentence' or 'segment'")
|
raise ValueError("buffer_trimming must be either 'sentence' or 'segment'")
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from functools import lru_cache
|
|||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
from .backends import FasterWhisperASR, MLXWhisper, WhisperTimestampedASR, OpenaiApiASR
|
from .backends import FasterWhisperASR, MLXWhisper, WhisperTimestampedASR, OpenaiApiASR
|
||||||
|
from whisperlivekit.warmup import warmup_asr
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -63,11 +64,23 @@ def create_tokenizer(lan):
|
|||||||
return WtPtok()
|
return WtPtok()
|
||||||
|
|
||||||
|
|
||||||
def backend_factory(args):
|
def backend_factory(
|
||||||
backend = args.backend
|
backend,
|
||||||
|
lan,
|
||||||
|
model_size,
|
||||||
|
model_cache_dir,
|
||||||
|
model_dir,
|
||||||
|
task,
|
||||||
|
buffer_trimming,
|
||||||
|
buffer_trimming_sec,
|
||||||
|
confidence_validation,
|
||||||
|
warmup_file=None,
|
||||||
|
min_chunk_size=None,
|
||||||
|
):
|
||||||
|
backend = backend
|
||||||
if backend == "openai-api":
|
if backend == "openai-api":
|
||||||
logger.debug("Using OpenAI API.")
|
logger.debug("Using OpenAI API.")
|
||||||
asr = OpenaiApiASR(lan=args.lan)
|
asr = OpenaiApiASR(lan=lan)
|
||||||
else:
|
else:
|
||||||
if backend == "faster-whisper":
|
if backend == "faster-whisper":
|
||||||
asr_cls = FasterWhisperASR
|
asr_cls = FasterWhisperASR
|
||||||
@@ -77,34 +90,33 @@ def backend_factory(args):
|
|||||||
asr_cls = WhisperTimestampedASR
|
asr_cls = WhisperTimestampedASR
|
||||||
|
|
||||||
# Only for FasterWhisperASR and WhisperTimestampedASR
|
# Only for FasterWhisperASR and WhisperTimestampedASR
|
||||||
size = args.model
|
|
||||||
t = time.time()
|
t = time.time()
|
||||||
logger.info(f"Loading Whisper {size} model for language {args.lan}...")
|
logger.info(f"Loading Whisper {model_size} model for language {lan}...")
|
||||||
asr = asr_cls(
|
asr = asr_cls(
|
||||||
modelsize=size,
|
model_size=model_size,
|
||||||
lan=args.lan,
|
lan=lan,
|
||||||
cache_dir=getattr(args, 'model_cache_dir', None),
|
cache_dir=model_cache_dir,
|
||||||
model_dir=getattr(args, 'model_dir', None),
|
model_dir=model_dir,
|
||||||
)
|
)
|
||||||
e = time.time()
|
e = time.time()
|
||||||
logger.info(f"done. It took {round(e-t,2)} seconds.")
|
logger.info(f"done. It took {round(e-t,2)} seconds.")
|
||||||
|
|
||||||
# Apply common configurations
|
if task == "translate":
|
||||||
if getattr(args, "vad", False): # Checks if VAD argument is present and True
|
|
||||||
logger.info("Setting VAD filter")
|
|
||||||
asr.use_vad()
|
|
||||||
|
|
||||||
language = args.lan
|
|
||||||
if args.task == "translate":
|
|
||||||
if backend != "simulstreaming":
|
|
||||||
asr.set_translate_task()
|
|
||||||
tgt_language = "en" # Whisper translates into English
|
tgt_language = "en" # Whisper translates into English
|
||||||
else:
|
else:
|
||||||
tgt_language = language # Whisper transcribes in this language
|
tgt_language = lan # Whisper transcribes in this language
|
||||||
|
|
||||||
# Create the tokenizer
|
# Create the tokenizer
|
||||||
if args.buffer_trimming == "sentence":
|
if buffer_trimming == "sentence":
|
||||||
tokenizer = create_tokenizer(tgt_language)
|
tokenizer = create_tokenizer(tgt_language)
|
||||||
else:
|
else:
|
||||||
tokenizer = None
|
tokenizer = None
|
||||||
return asr, tokenizer
|
|
||||||
|
warmup_asr(asr, warmup_file)
|
||||||
|
|
||||||
|
asr.confidence_validation = confidence_validation
|
||||||
|
asr.tokenizer = tokenizer
|
||||||
|
asr.buffer_trimming = buffer_trimming
|
||||||
|
asr.buffer_trimming_sec = buffer_trimming_sec
|
||||||
|
return asr
|
||||||