/* Theme, WebSocket, recording, rendering logic extracted from inline script and adapted for segmented theme control and WS caption */ let isRecording = false; let websocket = null; let recorder = null; let chunkDuration = 100; let websocketUrl = "ws://localhost:8000/asr"; let userClosing = false; let wakeLock = null; let startTime = null; let timerInterval = null; let audioContext = null; let analyser = null; let microphone = null; let waveCanvas = document.getElementById("waveCanvas"); let waveCtx = waveCanvas.getContext("2d"); let animationFrame = null; let waitingForStop = false; let lastReceivedData = null; let lastSignature = null; let availableMicrophones = []; let selectedMicrophoneId = null; waveCanvas.width = 60 * (window.devicePixelRatio || 1); waveCanvas.height = 30 * (window.devicePixelRatio || 1); waveCtx.scale(window.devicePixelRatio || 1, window.devicePixelRatio || 1); const statusText = document.getElementById("status"); const recordButton = document.getElementById("recordButton"); const chunkSelector = document.getElementById("chunkSelector"); const websocketInput = document.getElementById("websocketInput"); const websocketDefaultSpan = document.getElementById("wsDefaultUrl"); const linesTranscriptDiv = document.getElementById("linesTranscript"); const timerElement = document.querySelector(".timer"); const themeRadios = document.querySelectorAll('input[name="theme"]'); const microphoneSelect = document.getElementById("microphoneSelect"); const settingsToggle = document.getElementById("settingsToggle"); const settingsDiv = document.querySelector(".settings"); chrome.runtime.onInstalled.addListener((details) => { if (details.reason.search(/install/g) === -1) { return } chrome.tabs.create({ url: chrome.runtime.getURL("welcome.html"), active: true }) }) function getWaveStroke() { const styles = getComputedStyle(document.documentElement); const v = styles.getPropertyValue("--wave-stroke").trim(); return v || "#000"; } let waveStroke = getWaveStroke(); function updateWaveStroke() { waveStroke = getWaveStroke(); } function applyTheme(pref) { if (pref === "light") { document.documentElement.setAttribute("data-theme", "light"); } else if (pref === "dark") { document.documentElement.setAttribute("data-theme", "dark"); } else { document.documentElement.removeAttribute("data-theme"); } updateWaveStroke(); } // Persisted theme preference const savedThemePref = localStorage.getItem("themePreference") || "system"; applyTheme(savedThemePref); if (themeRadios.length) { themeRadios.forEach((r) => { r.checked = r.value === savedThemePref; r.addEventListener("change", () => { if (r.checked) { localStorage.setItem("themePreference", r.value); applyTheme(r.value); } }); }); } // React to OS theme changes when in "system" mode const darkMq = window.matchMedia && window.matchMedia("(prefers-color-scheme: dark)"); const handleOsThemeChange = () => { const pref = localStorage.getItem("themePreference") || "system"; if (pref === "system") updateWaveStroke(); }; if (darkMq && darkMq.addEventListener) { darkMq.addEventListener("change", handleOsThemeChange); } else if (darkMq && darkMq.addListener) { // deprecated, but included for Safari compatibility darkMq.addListener(handleOsThemeChange); } async function enumerateMicrophones() { try { const micPermission = await navigator.permissions.query({ name: "microphone", }); const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); stream.getTracks().forEach(track => track.stop()); const devices = await navigator.mediaDevices.enumerateDevices(); availableMicrophones = devices.filter(device => device.kind === 'audioinput'); populateMicrophoneSelect(); console.log(`Found ${availableMicrophones.length} microphone(s)`); } catch (error) { console.error('Error enumerating microphones:', error); statusText.textContent = "Error accessing microphones. Please grant permission."; } } function populateMicrophoneSelect() { if (!microphoneSelect) return; microphoneSelect.innerHTML = ''; availableMicrophones.forEach((device, index) => { const option = document.createElement('option'); option.value = device.deviceId; option.textContent = device.label || `Microphone ${index + 1}`; microphoneSelect.appendChild(option); }); const savedMicId = localStorage.getItem('selectedMicrophone'); if (savedMicId && availableMicrophones.some(mic => mic.deviceId === savedMicId)) { microphoneSelect.value = savedMicId; selectedMicrophoneId = savedMicId; } } function handleMicrophoneChange() { selectedMicrophoneId = microphoneSelect.value || null; localStorage.setItem('selectedMicrophone', selectedMicrophoneId || ''); const selectedDevice = availableMicrophones.find(mic => mic.deviceId === selectedMicrophoneId); const deviceName = selectedDevice ? selectedDevice.label : 'Default Microphone'; console.log(`Selected microphone: ${deviceName}`); statusText.textContent = `Microphone changed to: ${deviceName}`; if (isRecording) { statusText.textContent = "Switching microphone... Please wait."; stopRecording().then(() => { setTimeout(() => { toggleRecording(); }, 1000); }); } } // Helpers function fmt1(x) { const n = Number(x); return Number.isFinite(n) ? n.toFixed(1) : x; } // Default WebSocket URL computation const host = window.location.hostname || "localhost"; const port = window.location.port; const protocol = window.location.protocol === "https:" ? "wss" : "ws"; const defaultWebSocketUrl = websocketUrl; // Populate default caption and input if (websocketDefaultSpan) websocketDefaultSpan.textContent = defaultWebSocketUrl; websocketInput.value = defaultWebSocketUrl; websocketUrl = defaultWebSocketUrl; // Optional chunk selector (guard for presence) if (chunkSelector) { chunkSelector.addEventListener("change", () => { chunkDuration = parseInt(chunkSelector.value); }); } // WebSocket input change handling websocketInput.addEventListener("change", () => { const urlValue = websocketInput.value.trim(); if (!urlValue.startsWith("ws://") && !urlValue.startsWith("wss://")) { statusText.textContent = "Invalid WebSocket URL (must start with ws:// or wss://)"; return; } websocketUrl = urlValue; statusText.textContent = "WebSocket URL updated. Ready to connect."; }); function setupWebSocket() { return new Promise((resolve, reject) => { try { websocket = new WebSocket(websocketUrl); } catch (error) { statusText.textContent = "Invalid WebSocket URL. Please check and try again."; reject(error); return; } websocket.onopen = () => { statusText.textContent = "Connected to server."; resolve(); }; websocket.onclose = () => { if (userClosing) { if (waitingForStop) { statusText.textContent = "Processing finalized or connection closed."; if (lastReceivedData) { renderLinesWithBuffer( lastReceivedData.lines || [], lastReceivedData.buffer_diarization || "", lastReceivedData.buffer_transcription || "", 0, 0, true ); } } } else { statusText.textContent = "Disconnected from the WebSocket server. (Check logs if model is loading.)"; if (isRecording) { stopRecording(); } } isRecording = false; waitingForStop = false; userClosing = false; lastReceivedData = null; websocket = null; updateUI(); }; websocket.onerror = () => { statusText.textContent = "Error connecting to WebSocket."; reject(new Error("Error connecting to WebSocket")); }; websocket.onmessage = (event) => { const data = JSON.parse(event.data); if (data.type === "ready_to_stop") { console.log("Ready to stop received, finalizing display and closing WebSocket."); waitingForStop = false; if (lastReceivedData) { renderLinesWithBuffer( lastReceivedData.lines || [], lastReceivedData.buffer_diarization || "", lastReceivedData.buffer_transcription || "", 0, 0, true ); } statusText.textContent = "Finished processing audio! Ready to record again."; recordButton.disabled = false; if (websocket) { websocket.close(); } return; } lastReceivedData = data; const { lines = [], buffer_transcription = "", buffer_diarization = "", remaining_time_transcription = 0, remaining_time_diarization = 0, status = "active_transcription", } = data; renderLinesWithBuffer( lines, buffer_diarization, buffer_transcription, remaining_time_diarization, remaining_time_transcription, false, status ); }; }); } function renderLinesWithBuffer( lines, buffer_diarization, buffer_transcription, remaining_time_diarization, remaining_time_transcription, isFinalizing = false, current_status = "active_transcription" ) { if (current_status === "no_audio_detected") { linesTranscriptDiv.innerHTML = "
No audio detected...
"; return; } const showLoading = !isFinalizing && (lines || []).some((it) => it.speaker == 0); const showTransLag = !isFinalizing && remaining_time_transcription > 0; const showDiaLag = !isFinalizing && !!buffer_diarization && remaining_time_diarization > 0; const signature = JSON.stringify({ lines: (lines || []).map((it) => ({ speaker: it.speaker, text: it.text, start: it.start, end: it.end })), buffer_transcription: buffer_transcription || "", buffer_diarization: buffer_diarization || "", status: current_status, showLoading, showTransLag, showDiaLag, isFinalizing: !!isFinalizing, }); if (lastSignature === signature) { const t = document.querySelector(".lag-transcription-value"); if (t) t.textContent = fmt1(remaining_time_transcription); const d = document.querySelector(".lag-diarization-value"); if (d) d.textContent = fmt1(remaining_time_diarization); const ld = document.querySelector(".loading-diarization-value"); if (ld) ld.textContent = fmt1(remaining_time_diarization); return; } lastSignature = signature; const linesHtml = (lines || []) .map((item, idx) => { let timeInfo = ""; if (item.start !== undefined && item.end !== undefined) { timeInfo = ` ${item.start} - ${item.end}`; } let speakerLabel = ""; if (item.speaker === -2) { speakerLabel = `Silence${timeInfo}`; } else if (item.speaker == 0 && !isFinalizing) { speakerLabel = `${fmt1( remaining_time_diarization )} second(s) of audio are undergoing diarization`; } else if (item.speaker !== 0) { speakerLabel = `Speaker ${item.speaker}${timeInfo}`; } let currentLineText = item.text || ""; if (idx === lines.length - 1) { if (!isFinalizing && item.speaker !== -2) { if (remaining_time_transcription > 0) { speakerLabel += `Lag ${fmt1( remaining_time_transcription )}s`; } if (buffer_diarization && remaining_time_diarization > 0) { speakerLabel += `Lag${fmt1( remaining_time_diarization )}s`; } } if (buffer_diarization) { if (isFinalizing) { currentLineText += (currentLineText.length > 0 && buffer_diarization.trim().length > 0 ? " " : "") + buffer_diarization.trim(); } else { currentLineText += `${buffer_diarization}`; } } if (buffer_transcription) { if (isFinalizing) { currentLineText += (currentLineText.length > 0 && buffer_transcription.trim().length > 0 ? " " : "") + buffer_transcription.trim(); } else { currentLineText += `${buffer_transcription}`; } } } return currentLineText.trim().length > 0 || speakerLabel.length > 0 ? `${speakerLabel}
${speakerLabel}