mirror of
https://github.com/QuentinFuxa/WhisperLiveKit.git
synced 2026-03-07 14:23:18 +00:00
Merge pull request #341 from AymurAI/feat/uv-deps-resolution
deps/docker: align python support, deterministic deps resolution & docker images releases
This commit is contained in:
13
.dockerignore
Normal file
13
.dockerignore
Normal file
@@ -0,0 +1,13 @@
|
||||
.git
|
||||
.github
|
||||
.venv
|
||||
__pycache__
|
||||
*.pyc
|
||||
.pytest_cache
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.cache
|
||||
.tmp
|
||||
.secrets
|
||||
dist
|
||||
build
|
||||
61
.github/workflows/publish-docker.yml
vendored
Normal file
61
.github/workflows/publish-docker.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
name: Publish Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Image tag to publish (without image suffix)"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.tag || github.ref_name }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- image_suffix: cpu-diarization-sortformer
|
||||
dockerfile: Dockerfile.cpu
|
||||
extras: cpu,diarization-sortformer
|
||||
- image_suffix: cu129-diarization-sortformer
|
||||
dockerfile: Dockerfile
|
||||
extras: cu129,diarization-sortformer
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set lowercase owner
|
||||
id: owner
|
||||
run: echo "value=${GITHUB_REPOSITORY_OWNER,,}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./${{ matrix.dockerfile }}
|
||||
push: true
|
||||
build-args: |
|
||||
EXTRAS=${{ matrix.extras }}
|
||||
tags: |
|
||||
ghcr.io/${{ steps.owner.outputs.value }}/whisperlivekit:${{ env.IMAGE_TAG }}-${{ matrix.image_suffix }}
|
||||
ghcr.io/${{ steps.owner.outputs.value }}/whisperlivekit:latest-${{ matrix.image_suffix }}
|
||||
124
Dockerfile
124
Dockerfile
@@ -1,86 +1,74 @@
|
||||
FROM nvidia/cuda:12.9.1-cudnn-devel-ubuntu24.04
|
||||
FROM ghcr.io/astral-sh/uv:0.10.4 AS uvbin
|
||||
|
||||
# --- MARK: Builder Stage
|
||||
FROM nvidia/cuda:12.9.1-cudnn-devel-ubuntu24.04 AS builder-gpu
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG EXTRAS
|
||||
ARG HF_PRECACHE_DIR
|
||||
ARG HF_TKN_FILE
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
python3-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install UV and set up the environment
|
||||
COPY --from=uvbin /uv /uvx /bin/
|
||||
|
||||
ENV UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy UV_NO_DEV=1
|
||||
ENV UV_PYTHON_PREFERENCE=only-managed
|
||||
ENV UV_PYTHON_INSTALL_DIR=/python
|
||||
|
||||
RUN uv python install 3.12
|
||||
|
||||
# Install dependencies first to leverage caching
|
||||
ARG EXTRAS=cu129
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
RUN set -eux; \
|
||||
set --; \
|
||||
for extra in $(echo "${EXTRAS:-}" | tr ',' ' '); do \
|
||||
set -- "$@" --extra "$extra"; \
|
||||
done; \
|
||||
uv sync --frozen --no-install-project --no-editable --no-cache "$@"
|
||||
|
||||
# Copy the source code and install the package only
|
||||
COPY whisperlivekit /app/whisperlivekit
|
||||
RUN set -eux; \
|
||||
set --; \
|
||||
for extra in $(echo "${EXTRAS:-}" | tr ',' ' '); do \
|
||||
set -- "$@" --extra "$extra"; \
|
||||
done; \
|
||||
uv sync --frozen --no-editable --no-cache "$@"
|
||||
|
||||
# --- MARK: Runtime Stage
|
||||
FROM nvidia/cuda:12.9.1-cudnn-runtime-ubuntu24.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
ffmpeg \
|
||||
git \
|
||||
build-essential \
|
||||
python3-dev \
|
||||
ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
apt-get install -y --no-install-recommends \
|
||||
ffmpeg &&\
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN python3 -m venv /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
# Copy UV binaries
|
||||
COPY --from=uvbin /uv /uvx /bin/
|
||||
|
||||
# timeout/retries for large torch wheels
|
||||
RUN pip3 install --upgrade pip setuptools wheel && \
|
||||
pip3 --disable-pip-version-check install --timeout=120 --retries=5 \
|
||||
--index-url https://download.pytorch.org/whl/cu129 \
|
||||
torch torchaudio \
|
||||
|| (echo "Initial install failed — retrying with extended timeout..." && \
|
||||
pip3 --disable-pip-version-check install --timeout=300 --retries=3 \
|
||||
--index-url https://download.pytorch.org/whl/cu129 \
|
||||
torch torchvision torchaudio)
|
||||
# Copy the Python version
|
||||
COPY --from=builder-gpu --chown=python:python /python /python
|
||||
|
||||
COPY . .
|
||||
|
||||
# Install WhisperLiveKit directly, allowing for optional dependencies
|
||||
# Example: --build-arg EXTRAS="translation"
|
||||
RUN if [ -n "$EXTRAS" ]; then \
|
||||
echo "Installing with extras: [$EXTRAS]"; \
|
||||
pip install --no-cache-dir "whisperlivekit[$EXTRAS]"; \
|
||||
else \
|
||||
echo "Installing base package only"; \
|
||||
pip install --no-cache-dir whisperlivekit; \
|
||||
fi
|
||||
|
||||
# In-container caching for Hugging Face models by:
|
||||
# A) Make the cache directory persistent via an anonymous volume.
|
||||
# Note: This only persists for a single, named container. This is
|
||||
# only for convenience at de/test stage.
|
||||
# For prod, it is better to use a named volume via host mount/k8s.
|
||||
VOLUME ["/root/.cache/huggingface/hub"]
|
||||
|
||||
|
||||
# or
|
||||
# B) Conditionally copy a local pre-cache from the build context to the
|
||||
# container's cache via the HF_PRECACHE_DIR build-arg.
|
||||
# WARNING: This will copy ALL files in the pre-cache location.
|
||||
|
||||
# Conditionally copy a cache directory if provided
|
||||
RUN if [ -n "$HF_PRECACHE_DIR" ]; then \
|
||||
echo "Copying Hugging Face cache from $HF_PRECACHE_DIR"; \
|
||||
mkdir -p /root/.cache/huggingface/hub && \
|
||||
cp -r $HF_PRECACHE_DIR/* /root/.cache/huggingface/hub; \
|
||||
else \
|
||||
echo "No local Hugging Face cache specified, skipping copy"; \
|
||||
fi
|
||||
|
||||
# Conditionally copy a Hugging Face token if provided. Useful for Diart backend (pyannote audio models)
|
||||
RUN if [ -n "$HF_TKN_FILE" ]; then \
|
||||
echo "Copying Hugging Face token from $HF_TKN_FILE"; \
|
||||
mkdir -p /root/.cache/huggingface && \
|
||||
cp $HF_TKN_FILE /root/.cache/huggingface/token; \
|
||||
else \
|
||||
echo "No Hugging Face token file specified, skipping token setup"; \
|
||||
fi
|
||||
# Copy the virtual environment with all dependencies installed
|
||||
COPY --from=builder-gpu /app/.venv /app/.venv
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
ENV UV_PYTHON_DOWNLOADS=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=120s --retries=3 \
|
||||
CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/')" || exit 1
|
||||
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/')" || exit 1
|
||||
|
||||
ENTRYPOINT ["whisperlivekit-server", "--host", "0.0.0.0"]
|
||||
|
||||
|
||||
102
Dockerfile.cpu
102
Dockerfile.cpu
@@ -1,64 +1,76 @@
|
||||
FROM python:3.13-slim
|
||||
FROM ghcr.io/astral-sh/uv:0.10.4 AS uvbin
|
||||
|
||||
# --- MARK: Builder Stage
|
||||
FROM debian:bookworm-slim AS builder-cpu
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG EXTRAS
|
||||
ARG HF_PRECACHE_DIR
|
||||
ARG HF_TKN_FILE
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
python3-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install UV and set up the environment
|
||||
COPY --from=uvbin /uv /uvx /bin/
|
||||
|
||||
ENV UV_COMPILE_BYTECODE=1 UV_LINK_MODE=copy UV_NO_DEV=1
|
||||
ENV UV_PYTHON_PREFERENCE=only-managed
|
||||
ENV UV_PYTHON_INSTALL_DIR=/python
|
||||
|
||||
RUN uv python install 3.12
|
||||
|
||||
# Install dependencies first to leverage caching
|
||||
ARG EXTRAS=cpu
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
RUN set -eux; \
|
||||
set --; \
|
||||
for extra in $(echo "${EXTRAS:-}" | tr ',' ' '); do \
|
||||
set -- "$@" --extra "$extra"; \
|
||||
done; \
|
||||
uv sync --frozen --no-install-project --no-editable --no-cache "$@"
|
||||
|
||||
# Copy the source code and install the package only
|
||||
COPY whisperlivekit /app/whisperlivekit
|
||||
RUN set -eux; \
|
||||
set --; \
|
||||
for extra in $(echo "${EXTRAS:-}" | tr ',' ' '); do \
|
||||
set -- "$@" --extra "$extra"; \
|
||||
done; \
|
||||
uv sync --frozen --no-editable --no-cache "$@"
|
||||
|
||||
# --- MARK: Runtime Stage
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ffmpeg \
|
||||
git \
|
||||
build-essential \
|
||||
python3-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
apt-get install -y --no-install-recommends \
|
||||
ffmpeg &&\
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install CPU-only PyTorch
|
||||
RUN pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
|
||||
# Copy UV binaries
|
||||
COPY --from=uvbin /uv /uvx /bin/
|
||||
|
||||
COPY . .
|
||||
# Copy the Python version
|
||||
COPY --from=builder-cpu --chown=python:python /python /python
|
||||
|
||||
# Install WhisperLiveKit directly, allowing for optional dependencies
|
||||
RUN if [ -n "$EXTRAS" ]; then \
|
||||
echo "Installing with extras: [$EXTRAS]"; \
|
||||
pip install --no-cache-dir whisperlivekit[$EXTRAS]; \
|
||||
else \
|
||||
echo "Installing base package only"; \
|
||||
pip install --no-cache-dir whisperlivekit; \
|
||||
fi
|
||||
# Copy the virtual environment with all dependencies installed
|
||||
COPY --from=builder-cpu /app/.venv /app/.venv
|
||||
|
||||
# Enable in-container caching for Hugging Face models
|
||||
VOLUME ["/root/.cache/huggingface/hub"]
|
||||
|
||||
# Conditionally copy a local pre-cache from the build context
|
||||
RUN if [ -n "$HF_PRECACHE_DIR" ]; then \
|
||||
echo "Copying Hugging Face cache from $HF_PRECACHE_DIR"; \
|
||||
mkdir -p /root/.cache/huggingface/hub && \
|
||||
cp -r $HF_PRECACHE_DIR/* /root/.cache/huggingface/hub; \
|
||||
else \
|
||||
echo "No local Hugging Face cache specified, skipping copy"; \
|
||||
fi
|
||||
|
||||
# Conditionally copy a Hugging Face token if provided
|
||||
RUN if [ -n "$HF_TKN_FILE" ]; then \
|
||||
echo "Copying Hugging Face token from $HF_TKN_FILE"; \
|
||||
mkdir -p /root/.cache/huggingface && \
|
||||
cp $HF_TKN_FILE /root/.cache/huggingface/token; \
|
||||
else \
|
||||
echo "No Hugging Face token file specified, skipping token setup"; \
|
||||
fi
|
||||
|
||||
# Expose port for the transcription server
|
||||
EXPOSE 8000
|
||||
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
ENV UV_PYTHON_DOWNLOADS=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=120s --retries=3 \
|
||||
CMD python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/')" || exit 1
|
||||
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/')" || exit 1
|
||||
|
||||
ENTRYPOINT ["whisperlivekit-server", "--host", "0.0.0.0"]
|
||||
|
||||
# Default args - you might want to use a smaller model for CPU
|
||||
CMD ["--model", "tiny"]
|
||||
CMD ["--model", "tiny"]
|
||||
|
||||
54
README.md
54
README.md
@@ -72,15 +72,29 @@ Go to `chrome-extension` for instructions.
|
||||
|
||||
#### Optional Dependencies
|
||||
|
||||
| Optional | `pip install` |
|
||||
|-----------|-------------|
|
||||
| **Windows/Linux optimizations** | `faster-whisper` |
|
||||
| **Apple Silicon optimizations** | `mlx-whisper` |
|
||||
| **Voxtral (multilingual, auto-detect)** | `transformers torch` (or use built-in `voxtral-mlx` on Apple Silicon) |
|
||||
| **Translation** | `nllw` |
|
||||
| **Speaker diarization** | `git+https://github.com/NVIDIA/NeMo.git@main#egg=nemo_toolkit[asr]` |
|
||||
| OpenAI API | `openai` |
|
||||
| *[Not recommanded]* Speaker diarization with Diart | `diart` |
|
||||
| Feature | `uv sync` | `pip install -e` |
|
||||
|-----------|-------------|-------------|
|
||||
| **Apple Silicon MLX Whisper backend** | `uv sync --extra mlx-whisper` | `pip install -e ".[mlx-whisper]"` |
|
||||
| **Voxtral (MLX backend, Apple Silicon)** | `uv sync --extra voxtral-mlx` | `pip install -e ".[voxtral-mlx]"` |
|
||||
| **CPU PyTorch stack** | `uv sync --extra cpu` | `pip install -e ".[cpu]"` |
|
||||
| **CUDA 12.9 PyTorch stack** | `uv sync --extra cu129` | `pip install -e ".[cu129]"` |
|
||||
| **Translation** | `uv sync --extra translation` | `pip install -e ".[translation]"` |
|
||||
| **Sentence tokenizer** | `uv sync --extra sentence_tokenizer` | `pip install -e ".[sentence_tokenizer]"` |
|
||||
| **Voxtral (HF backend)** | `uv sync --extra voxtral-hf` | `pip install -e ".[voxtral-hf]"` |
|
||||
| **Speaker diarization (Sortformer / NeMo)** | `uv sync --extra diarization-sortformer` | `pip install -e ".[diarization-sortformer]"` |
|
||||
| *[Not recommended]* Speaker diarization with Diart | `uv sync --extra diarization-diart` | `pip install -e ".[diarization-diart]"` |
|
||||
|
||||
Supported GPU profiles:
|
||||
|
||||
```bash
|
||||
# Profile A: Sortformer diarization
|
||||
uv sync --extra cu129 --extra diarization-sortformer
|
||||
|
||||
# Profile B: Voxtral HF + translation
|
||||
uv sync --extra cu129 --extra voxtral-hf --extra translation
|
||||
```
|
||||
|
||||
`voxtral-hf` and `diarization-sortformer` are intentionally incompatible extras and must be installed in separate environments.
|
||||
|
||||
See **Parameters & Configuration** below on how to use them.
|
||||
|
||||
@@ -102,6 +116,7 @@ detection is more reliable and does not bias towards English.
|
||||
|
||||
```bash
|
||||
# Apple Silicon (native MLX, recommended)
|
||||
pip install -e ".[voxtral-mlx]"
|
||||
wlk --backend voxtral-mlx
|
||||
|
||||
# Linux/GPU (HuggingFace transformers)
|
||||
@@ -279,7 +294,7 @@ docker run --gpus all -p 8000:8000 --name wlk wlk
|
||||
|
||||
**CPU only:**
|
||||
```bash
|
||||
docker build -f Dockerfile.cpu -t wlk .
|
||||
docker build -f Dockerfile.cpu -t wlk --build-arg EXTRAS="cpu" .
|
||||
docker run -p 8000:8000 --name wlk wlk
|
||||
```
|
||||
|
||||
@@ -291,6 +306,18 @@ docker run -p 8000:8000 --name wlk wlk
|
||||
docker run --gpus all -p 8000:8000 --name wlk wlk --model large-v3 --language fr
|
||||
```
|
||||
|
||||
**Compose (recommended for cache + token wiring):**
|
||||
```bash
|
||||
# GPU Sortformer profile
|
||||
docker compose up --build wlk-gpu-sortformer
|
||||
|
||||
# GPU Voxtral profile
|
||||
docker compose up --build wlk-gpu-voxtral
|
||||
|
||||
# CPU service
|
||||
docker compose up --build wlk-cpu
|
||||
```
|
||||
|
||||
### Memory Requirements
|
||||
- **Large models**: Ensure your Docker runtime has sufficient memory allocated
|
||||
|
||||
@@ -298,9 +325,10 @@ docker run --gpus all -p 8000:8000 --name wlk wlk --model large-v3 --language fr
|
||||
#### Customization
|
||||
|
||||
- `--build-arg` Options:
|
||||
- `EXTRAS="translation"` - Add extras to the image's installation (no spaces). Remember to set necessary container options!
|
||||
- `HF_PRECACHE_DIR="./.cache/"` - Pre-load a model cache for faster first-time start
|
||||
- `HF_TKN_FILE="./token"` - Add your Hugging Face Hub access token to download gated models
|
||||
- `EXTRAS="cu129,diarization-sortformer"` - GPU Sortformer profile extras.
|
||||
- `EXTRAS="cu129,voxtral-hf,translation"` - GPU Voxtral profile extras.
|
||||
- `EXTRAS="cpu,diarization-diart,translation"` - CPU profile extras.
|
||||
- Hugging Face cache + token are configured in `compose.yml` using a named volume and `HF_TKN_FILE` (default: `./token`).
|
||||
|
||||
## Testing & Benchmarks
|
||||
|
||||
|
||||
52
compose.yml
Normal file
52
compose.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
services:
|
||||
wlk-gpu-sortformer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
EXTRAS: ${GPU_SORTFORMER_EXTRAS:-cu129,diarization-sortformer}
|
||||
image: wlk:gpu-sortformer
|
||||
gpus: all
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- hf-cache:/root/.cache/huggingface/hub
|
||||
# - ${HF_TKN_FILE:-./token}:/root/.cache/huggingface/token:ro
|
||||
environment:
|
||||
- HF_TOKEN
|
||||
command: ["--model", "medium", "--diarization", "--pcm-input"]
|
||||
|
||||
wlk-gpu-voxtral:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
EXTRAS: ${GPU_VOXTRAL_EXTRAS:-cu129,voxtral-hf,translation}
|
||||
image: wlk:gpu-voxtral
|
||||
gpus: all
|
||||
ports:
|
||||
- "8001:8000"
|
||||
volumes:
|
||||
- hf-cache:/root/.cache/huggingface/hub
|
||||
# - ${HF_TKN_FILE:-./token}:/root/.cache/huggingface/token:ro
|
||||
environment:
|
||||
- HF_TOKEN
|
||||
command: ["--backend", "voxtral", "--pcm-input"]
|
||||
|
||||
wlk-cpu:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.cpu
|
||||
args:
|
||||
EXTRAS: ${CPU_EXTRAS:-cpu,diarization-diart,translation}
|
||||
image: wlk:cpu
|
||||
ports:
|
||||
- "8000:8000"
|
||||
volumes:
|
||||
- hf-cache:/root/.cache/huggingface/hub
|
||||
# - ${HF_TKN_FILE:-./token}:/root/.cache/huggingface/token:ro
|
||||
environment:
|
||||
- HF_TOKEN
|
||||
|
||||
volumes:
|
||||
hf-cache:
|
||||
@@ -7,24 +7,18 @@ name = "whisperlivekit"
|
||||
version = "0.2.19"
|
||||
description = "Real-time speech-to-text with speaker diarization using Whisper"
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
{ name = "Quentin Fuxa" }
|
||||
]
|
||||
authors = [{ name = "Quentin Fuxa" }]
|
||||
license = { file = "LICENSE" }
|
||||
requires-python = ">=3.9"
|
||||
requires-python = ">=3.11, <3.14"
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Programming Language :: Python :: 3.14",
|
||||
"Programming Language :: Python :: 3.15",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
"Topic :: Multimedia :: Sound/Audio :: Speech"
|
||||
"Topic :: Multimedia :: Sound/Audio :: Speech",
|
||||
]
|
||||
dependencies = [
|
||||
"fastapi",
|
||||
@@ -32,20 +26,91 @@ dependencies = [
|
||||
"soundfile",
|
||||
"uvicorn",
|
||||
"websockets",
|
||||
"torchaudio>=2.0.0",
|
||||
"torch>=2.0.0",
|
||||
"huggingface-hub>=0.25.0",
|
||||
"faster-whisper>=1.2.0",
|
||||
"torch>=2.0.0",
|
||||
"torchaudio>=2.0.0",
|
||||
"tqdm",
|
||||
"tiktoken",
|
||||
'triton>=2.0.0; platform_machine == "x86_64" and (sys_platform == "linux" or sys_platform == "linux2")'
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
test = ["pytest>=7.0", "pytest-asyncio>=0.21"]
|
||||
translation = ["nllw"]
|
||||
sentence_tokenizer = ["mosestokenizer", "wtpsplit"]
|
||||
voxtral-hf = ["transformers>=5.2.0", "mistral-common[audio]"]
|
||||
mlx-whisper = [
|
||||
'mlx>=0.11.0; sys_platform == "darwin" and platform_machine == "arm64"',
|
||||
'mlx-whisper>=0.4.0; sys_platform == "darwin" and platform_machine == "arm64"',
|
||||
]
|
||||
voxtral-mlx = [
|
||||
'mlx>=0.11.0; sys_platform == "darwin" and platform_machine == "arm64"',
|
||||
'mlx-whisper>=0.4.0; sys_platform == "darwin" and platform_machine == "arm64"',
|
||||
"mistral-common[audio]",
|
||||
]
|
||||
voxtral-hf = [
|
||||
"transformers>=5.2.0; python_version >= '3.10'",
|
||||
"mistral-common[audio]",
|
||||
"accelerate>=0.12",
|
||||
]
|
||||
cpu = ["torch>=2.0.0", "torchaudio>=2.0.0"]
|
||||
cu129 = [
|
||||
"torch>=2.0.0",
|
||||
"torchaudio>=2.0.0",
|
||||
'triton>=2.0.0; platform_machine == "x86_64" and (sys_platform == "linux" or sys_platform == "linux2")',
|
||||
]
|
||||
diarization-sortformer = [
|
||||
"nemo-toolkit[asr]>2.4; python_version >= '3.10' and python_version < '3.13'",
|
||||
]
|
||||
diarization-diart = [
|
||||
"diart",
|
||||
"torch<2.9.0",
|
||||
"torchaudio<2.9.0",
|
||||
"torchvision<0.24.0",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
dev = ["rich>=14.3.3"]
|
||||
|
||||
[tool.uv]
|
||||
conflicts = [
|
||||
[
|
||||
{ extra = "cpu" },
|
||||
{ extra = "cu129" },
|
||||
],
|
||||
[
|
||||
{ extra = "diarization-diart" },
|
||||
{ extra = "cu129" },
|
||||
],
|
||||
[
|
||||
{ extra = "voxtral-hf" },
|
||||
{ extra = "diarization-sortformer" },
|
||||
],
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
torch = [
|
||||
{ index = "pytorch-cpu", extra = "cpu", marker = "platform_system != 'Darwin'" },
|
||||
{ index = "pytorch-cpu", extra = "diarization-diart", marker = "platform_system != 'Darwin'" },
|
||||
{ index = "pytorch-cu129", extra = "cu129", marker = "platform_system == 'Linux' and platform_machine == 'x86_64'" },
|
||||
]
|
||||
torchaudio = [
|
||||
{ index = "pytorch-cpu", extra = "cpu", marker = "platform_system != 'Darwin'" },
|
||||
{ index = "pytorch-cpu", extra = "diarization-diart", marker = "platform_system != 'Darwin'" },
|
||||
{ index = "pytorch-cu129", extra = "cu129", marker = "platform_system == 'Linux' and platform_machine == 'x86_64'" },
|
||||
]
|
||||
torchvision = [
|
||||
{ index = "pytorch-cpu", extra = "diarization-diart", marker = "platform_system != 'Darwin'" },
|
||||
]
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cpu"
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
explicit = true
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cu129"
|
||||
url = "https://download.pytorch.org/whl/cu129"
|
||||
explicit = true
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/QuentinFuxa/WhisperLiveKit"
|
||||
@@ -66,7 +131,7 @@ packages = [
|
||||
"whisperlivekit.web",
|
||||
"whisperlivekit.local_agreement",
|
||||
"whisperlivekit.voxtral_mlx",
|
||||
"whisperlivekit.silero_vad_models"
|
||||
"whisperlivekit.silero_vad_models",
|
||||
]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
|
||||
580
scripts/python_support_matrix.py
Normal file
580
scripts/python_support_matrix.py
Normal file
@@ -0,0 +1,580 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Offline Python support matrix runner for WhisperLiveKit."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
try:
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
HAS_RICH = True
|
||||
except Exception:
|
||||
HAS_RICH = False
|
||||
|
||||
SAMPLE_URL = (
|
||||
"https://github.com/pyannote/pyannote-audio/raw/develop/tutorials/assets/sample.wav"
|
||||
)
|
||||
SAMPLE_PATH = Path("audio_tests/support-matrix-sample.wav")
|
||||
DEFAULT_LOGS_DIR = Path("outputs/python-matrix/logs")
|
||||
PYTHON_VERSIONS = ("3.11", "3.12", "3.13")
|
||||
CONSOLE = Console() if HAS_RICH else None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MatrixRow:
|
||||
row_id: str
|
||||
extras: tuple[str, ...]
|
||||
backend: str
|
||||
policy: str
|
||||
diarization_backend: str
|
||||
requires_gpu: bool = False
|
||||
|
||||
|
||||
CASES = (
|
||||
MatrixRow(
|
||||
row_id="fw-diart-cpu",
|
||||
extras=("test", "cpu", "diarization-diart"),
|
||||
backend="faster-whisper",
|
||||
policy="simulstreaming",
|
||||
diarization_backend="diart",
|
||||
),
|
||||
MatrixRow(
|
||||
row_id="fw-sortformer-cpu",
|
||||
extras=("test", "cpu", "diarization-sortformer"),
|
||||
backend="faster-whisper",
|
||||
policy="simulstreaming",
|
||||
diarization_backend="sortformer",
|
||||
),
|
||||
MatrixRow(
|
||||
row_id="fw-sortformer-gpu",
|
||||
extras=("test", "cu129", "diarization-sortformer"),
|
||||
backend="faster-whisper",
|
||||
policy="simulstreaming",
|
||||
diarization_backend="sortformer",
|
||||
requires_gpu=True,
|
||||
),
|
||||
MatrixRow(
|
||||
row_id="voxtral-diart-cpu",
|
||||
extras=("test", "cpu", "voxtral-hf", "diarization-diart"),
|
||||
backend="voxtral",
|
||||
policy="voxtral",
|
||||
diarization_backend="diart",
|
||||
),
|
||||
)
|
||||
|
||||
EXPECTED_FAILURE_CASES = {
|
||||
("3.11", "voxtral-diart-cpu"): "known_unstable_voxtral_diart_cpu",
|
||||
("3.12", "voxtral-diart-cpu"): "known_unstable_voxtral_diart_cpu",
|
||||
}
|
||||
UNSUPPORTED_CASES = {
|
||||
("3.13", "fw-sortformer-cpu"): "unsupported_py313_sortformer_protobuf",
|
||||
("3.13", "fw-sortformer-gpu"): "unsupported_py313_sortformer_protobuf",
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CaseResult:
|
||||
python_version: str
|
||||
row_id: str
|
||||
status: Literal["PASS", "FAIL", "N/A"]
|
||||
reason: str
|
||||
duration_sec: float
|
||||
hint: str = ""
|
||||
log_path: str = ""
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Minimal WhisperLiveKit offline support matrix"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeout-sec",
|
||||
type=int,
|
||||
default=300,
|
||||
help="Per-case timeout in seconds (default: 300)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logs-dir",
|
||||
default=str(DEFAULT_LOGS_DIR),
|
||||
help="Directory where per-case logs are written (default: outputs/python-matrix/logs)",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def safe_slug(text: str) -> str:
|
||||
return text.replace("=", "-").replace("|", "__").replace("/", "-").replace(" ", "-")
|
||||
|
||||
|
||||
def status_style(status: str) -> str:
|
||||
if status == "PASS":
|
||||
return "green"
|
||||
if status == "FAIL":
|
||||
return "bold red"
|
||||
if status == "N/A":
|
||||
return "yellow"
|
||||
return "white"
|
||||
|
||||
|
||||
def print_line(message: str, style: str | None = None) -> None:
|
||||
if CONSOLE is None:
|
||||
print(message)
|
||||
return
|
||||
if style:
|
||||
CONSOLE.print(message, style=style, highlight=False)
|
||||
else:
|
||||
CONSOLE.print(message, highlight=False)
|
||||
|
||||
|
||||
def tail_text(text: str | None, max_chars: int = 220) -> str:
|
||||
if not text:
|
||||
return ""
|
||||
normalized = " ".join(text.split())
|
||||
if len(normalized) <= max_chars:
|
||||
return normalized
|
||||
return normalized[-max_chars:]
|
||||
|
||||
|
||||
def run_command(
|
||||
cmd: list[str],
|
||||
cwd: Path,
|
||||
env: dict[str, str],
|
||||
timeout: int | None = None,
|
||||
log_path: Path | None = None,
|
||||
log_section: str | None = None,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
def _append_log(
|
||||
*,
|
||||
command: list[str],
|
||||
section: str,
|
||||
returncode: int | None,
|
||||
stdout: str | None,
|
||||
stderr: str | None,
|
||||
timed_out: bool = False,
|
||||
) -> None:
|
||||
if log_path is None:
|
||||
return
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with log_path.open("a", encoding="utf-8") as f:
|
||||
f.write(f"\n=== {section} ===\n")
|
||||
f.write(f"$ {shlex.join(command)}\n")
|
||||
if timed_out:
|
||||
f.write("status: timeout\n")
|
||||
else:
|
||||
f.write(f"status: exit_code={returncode}\n")
|
||||
if stdout:
|
||||
f.write("--- stdout ---\n")
|
||||
f.write(stdout)
|
||||
if not stdout.endswith("\n"):
|
||||
f.write("\n")
|
||||
if stderr:
|
||||
f.write("--- stderr ---\n")
|
||||
f.write(stderr)
|
||||
if not stderr.endswith("\n"):
|
||||
f.write("\n")
|
||||
|
||||
section = log_section or "command"
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
cmd,
|
||||
cwd=str(cwd),
|
||||
env=env,
|
||||
text=True,
|
||||
capture_output=True,
|
||||
check=False,
|
||||
timeout=timeout,
|
||||
)
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
_append_log(
|
||||
command=cmd,
|
||||
section=section,
|
||||
returncode=None,
|
||||
stdout=exc.stdout if isinstance(exc.stdout, str) else None,
|
||||
stderr=exc.stderr if isinstance(exc.stderr, str) else None,
|
||||
timed_out=True,
|
||||
)
|
||||
raise
|
||||
|
||||
_append_log(
|
||||
command=cmd,
|
||||
section=section,
|
||||
returncode=proc.returncode,
|
||||
stdout=proc.stdout,
|
||||
stderr=proc.stderr,
|
||||
)
|
||||
return proc
|
||||
|
||||
|
||||
def detect_gpu_available() -> bool:
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
["nvidia-smi", "-L"],
|
||||
text=True,
|
||||
capture_output=True,
|
||||
check=False,
|
||||
timeout=10,
|
||||
)
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
return False
|
||||
return proc.returncode == 0
|
||||
|
||||
|
||||
def download_sample(repo_root: Path) -> Path:
|
||||
target = repo_root / SAMPLE_PATH
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
cmd = [
|
||||
"curl",
|
||||
"--fail",
|
||||
"--location",
|
||||
"--silent",
|
||||
"--show-error",
|
||||
SAMPLE_URL,
|
||||
"--output",
|
||||
str(target),
|
||||
]
|
||||
proc = run_command(cmd, cwd=repo_root, env=os.environ.copy())
|
||||
if proc.returncode != 0:
|
||||
hint = tail_text(proc.stderr or proc.stdout)
|
||||
raise RuntimeError(f"sample_download_failed: {hint}")
|
||||
return target
|
||||
|
||||
|
||||
def sync_case_environment(
|
||||
repo_root: Path,
|
||||
python_version: str,
|
||||
row: MatrixRow,
|
||||
env_dir: Path,
|
||||
log_path: Path,
|
||||
) -> tuple[bool, str]:
|
||||
cmd = ["uv", "sync", "--python", python_version, "--no-dev"]
|
||||
for extra in row.extras:
|
||||
cmd.extend(["--extra", extra])
|
||||
env = os.environ.copy()
|
||||
env["UV_PROJECT_ENVIRONMENT"] = str(env_dir)
|
||||
proc = run_command(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
env=env,
|
||||
log_path=log_path,
|
||||
log_section="sync",
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
return False, tail_text(proc.stderr or proc.stdout)
|
||||
return True, ""
|
||||
|
||||
|
||||
def apply_expected_failure_policy(result: CaseResult) -> CaseResult:
|
||||
expected_reason = EXPECTED_FAILURE_CASES.get((result.python_version, result.row_id))
|
||||
if result.status != "FAIL" or not expected_reason:
|
||||
return result
|
||||
override_hint = result.hint
|
||||
if result.reason:
|
||||
override_hint = (
|
||||
f"expected_failure_override original_reason={result.reason}; {override_hint}"
|
||||
if override_hint
|
||||
else f"expected_failure_override original_reason={result.reason}"
|
||||
)
|
||||
return CaseResult(
|
||||
python_version=result.python_version,
|
||||
row_id=result.row_id,
|
||||
status="N/A",
|
||||
reason=expected_reason,
|
||||
duration_sec=result.duration_sec,
|
||||
hint=override_hint,
|
||||
log_path=result.log_path,
|
||||
)
|
||||
|
||||
|
||||
def build_offline_command(
|
||||
python_version: str,
|
||||
row: MatrixRow,
|
||||
sample_audio: Path,
|
||||
timeout_sec: int,
|
||||
) -> tuple[list[str], int | None]:
|
||||
base_cmd = [
|
||||
"uv",
|
||||
"run",
|
||||
"--python",
|
||||
python_version,
|
||||
"--no-sync",
|
||||
"python",
|
||||
"test_backend_offline.py",
|
||||
"--backend",
|
||||
row.backend,
|
||||
"--policy",
|
||||
row.policy,
|
||||
"--audio",
|
||||
str(sample_audio),
|
||||
"--model",
|
||||
"tiny",
|
||||
"--diarization",
|
||||
"--diarization-backend",
|
||||
row.diarization_backend,
|
||||
"--lan",
|
||||
"en",
|
||||
"--no-realtime",
|
||||
]
|
||||
if shutil.which("timeout"):
|
||||
return ["timeout", str(timeout_sec), *base_cmd], None
|
||||
return base_cmd, timeout_sec
|
||||
|
||||
|
||||
def run_case(
|
||||
repo_root: Path,
|
||||
python_version: str,
|
||||
row: MatrixRow,
|
||||
sample_audio: Path,
|
||||
timeout_sec: int,
|
||||
gpu_available: bool,
|
||||
logs_dir: Path,
|
||||
) -> CaseResult:
|
||||
start = time.monotonic()
|
||||
case_slug = safe_slug(f"py{python_version}-{row.row_id}")
|
||||
log_path = logs_dir / f"run-{case_slug}.log"
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
log_path.write_text("", encoding="utf-8")
|
||||
|
||||
unsupported_reason = UNSUPPORTED_CASES.get((python_version, row.row_id))
|
||||
if unsupported_reason:
|
||||
log_path.write_text(
|
||||
f"[matrix] precheck_short_circuit status=N/A reason={unsupported_reason}\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
return CaseResult(
|
||||
python_version=python_version,
|
||||
row_id=row.row_id,
|
||||
status="N/A",
|
||||
reason=unsupported_reason,
|
||||
duration_sec=0.0,
|
||||
hint="unsupported_case_precheck",
|
||||
log_path=str(log_path),
|
||||
)
|
||||
|
||||
if row.requires_gpu and not gpu_available:
|
||||
return CaseResult(
|
||||
python_version=python_version,
|
||||
row_id=row.row_id,
|
||||
status="N/A",
|
||||
reason="gpu_unavailable",
|
||||
duration_sec=0.0,
|
||||
hint="nvidia-smi unavailable or failed",
|
||||
log_path=str(log_path),
|
||||
)
|
||||
|
||||
env_dir = repo_root / ".matrix-envs" / safe_slug(f"py{python_version}-{row.row_id}")
|
||||
sync_ok, sync_hint = sync_case_environment(
|
||||
repo_root,
|
||||
python_version,
|
||||
row,
|
||||
env_dir,
|
||||
log_path=log_path,
|
||||
)
|
||||
if not sync_ok:
|
||||
return CaseResult(
|
||||
python_version=python_version,
|
||||
row_id=row.row_id,
|
||||
status="FAIL",
|
||||
reason="dependency_sync_failed",
|
||||
duration_sec=round(time.monotonic() - start, 3),
|
||||
hint=sync_hint,
|
||||
log_path=str(log_path),
|
||||
)
|
||||
|
||||
cmd, process_timeout = build_offline_command(
|
||||
python_version, row, sample_audio, timeout_sec
|
||||
)
|
||||
env = os.environ.copy()
|
||||
env["UV_PROJECT_ENVIRONMENT"] = str(env_dir)
|
||||
if row.requires_gpu:
|
||||
env.pop("CUDA_VISIBLE_DEVICES", None)
|
||||
else:
|
||||
env["CUDA_VISIBLE_DEVICES"] = ""
|
||||
try:
|
||||
proc = run_command(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
env=env,
|
||||
timeout=process_timeout,
|
||||
log_path=log_path,
|
||||
log_section="offline",
|
||||
)
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
return CaseResult(
|
||||
python_version=python_version,
|
||||
row_id=row.row_id,
|
||||
status="FAIL",
|
||||
reason="offline_timeout",
|
||||
duration_sec=round(time.monotonic() - start, 3),
|
||||
hint=tail_text((exc.stderr or "") if isinstance(exc.stderr, str) else ""),
|
||||
log_path=str(log_path),
|
||||
)
|
||||
|
||||
hint = tail_text(proc.stderr or proc.stdout)
|
||||
if proc.returncode == 0:
|
||||
return CaseResult(
|
||||
python_version=python_version,
|
||||
row_id=row.row_id,
|
||||
status="PASS",
|
||||
reason="ok",
|
||||
duration_sec=round(time.monotonic() - start, 3),
|
||||
hint=hint,
|
||||
log_path=str(log_path),
|
||||
)
|
||||
|
||||
reason = "offline_timeout" if proc.returncode == 124 else "offline_run_failed"
|
||||
return CaseResult(
|
||||
python_version=python_version,
|
||||
row_id=row.row_id,
|
||||
status="FAIL",
|
||||
reason=reason,
|
||||
duration_sec=round(time.monotonic() - start, 3),
|
||||
hint=hint,
|
||||
log_path=str(log_path),
|
||||
)
|
||||
|
||||
|
||||
def print_summary(results: list[CaseResult]) -> None:
|
||||
pass_count = sum(1 for row in results if row.status == "PASS")
|
||||
fail_count = sum(1 for row in results if row.status == "FAIL")
|
||||
na_count = sum(1 for row in results if row.status == "N/A")
|
||||
if CONSOLE is None:
|
||||
print("\n[matrix] results")
|
||||
print("python | row | status | reason | duration_s")
|
||||
print("---|---|---|---|---")
|
||||
for result in results:
|
||||
print(
|
||||
f"{result.python_version} | {result.row_id} | {result.status} | "
|
||||
f"{result.reason} | {result.duration_sec:.3f}"
|
||||
)
|
||||
print(
|
||||
f"\n[matrix] summary pass={pass_count} fail={fail_count} "
|
||||
f"na={na_count} total={len(results)}"
|
||||
)
|
||||
else:
|
||||
table = Table(title="Support Matrix Results")
|
||||
table.add_column("Python", style="cyan", no_wrap=True)
|
||||
table.add_column("Row", style="white")
|
||||
table.add_column("Status", no_wrap=True)
|
||||
table.add_column("Reason")
|
||||
table.add_column("Duration (s)", justify="right", no_wrap=True)
|
||||
for result in results:
|
||||
table.add_row(
|
||||
result.python_version,
|
||||
result.row_id,
|
||||
f"[{status_style(result.status)}]{result.status}[/{status_style(result.status)}]",
|
||||
result.reason,
|
||||
f"{result.duration_sec:.3f}",
|
||||
)
|
||||
CONSOLE.print()
|
||||
CONSOLE.print(table)
|
||||
CONSOLE.print(
|
||||
f"[bold]Summary[/bold] "
|
||||
f"pass=[green]{pass_count}[/green] "
|
||||
f"fail=[bold red]{fail_count}[/bold red] "
|
||||
f"na=[yellow]{na_count}[/yellow] "
|
||||
f"total={len(results)}"
|
||||
)
|
||||
|
||||
diagnostics = [row for row in results if row.status in {"FAIL", "N/A"} and row.hint]
|
||||
if diagnostics:
|
||||
if CONSOLE is None:
|
||||
print("\n[matrix] diagnostics (failed/n-a cases)")
|
||||
for row in diagnostics:
|
||||
print(
|
||||
f"- py={row.python_version} row={row.row_id} "
|
||||
f"status={row.status} reason={row.reason}"
|
||||
)
|
||||
print(f" hint: {row.hint}")
|
||||
if row.log_path:
|
||||
print(f" log: {row.log_path}")
|
||||
else:
|
||||
diagnostics_table = Table(title="Diagnostics (FAIL / N/A)")
|
||||
diagnostics_table.add_column("Case", style="cyan")
|
||||
diagnostics_table.add_column("Status", no_wrap=True)
|
||||
diagnostics_table.add_column("Reason")
|
||||
diagnostics_table.add_column("Hint")
|
||||
diagnostics_table.add_column("Log")
|
||||
for row in diagnostics:
|
||||
diagnostics_table.add_row(
|
||||
f"py={row.python_version} {row.row_id}",
|
||||
f"[{status_style(row.status)}]{row.status}[/{status_style(row.status)}]",
|
||||
row.reason,
|
||||
row.hint,
|
||||
row.log_path,
|
||||
)
|
||||
CONSOLE.print()
|
||||
CONSOLE.print(diagnostics_table)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
if args.timeout_sec <= 0:
|
||||
print("[matrix] error: --timeout-sec must be > 0", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
repo_root = Path(__file__).resolve().parents[1]
|
||||
logs_dir = (repo_root / args.logs_dir).resolve()
|
||||
logs_dir.mkdir(parents=True, exist_ok=True)
|
||||
print_line(f"[matrix] repo_root={repo_root}", style="cyan")
|
||||
print_line(f"[matrix] timeout_sec={args.timeout_sec}", style="cyan")
|
||||
print_line(f"[matrix] logs_dir={logs_dir}", style="cyan")
|
||||
|
||||
try:
|
||||
sample_audio = download_sample(repo_root)
|
||||
except Exception as exc: # pragma: no cover - straightforward failure path
|
||||
if CONSOLE is None:
|
||||
print(f"[matrix] sample_download_failed: {exc}", file=sys.stderr)
|
||||
else:
|
||||
CONSOLE.print(
|
||||
f"[matrix] sample_download_failed: {exc}",
|
||||
style="bold red",
|
||||
highlight=False,
|
||||
)
|
||||
return 1
|
||||
print_line(f"[matrix] sample_audio={sample_audio}", style="cyan")
|
||||
|
||||
gpu_available = detect_gpu_available()
|
||||
print_line(f"[matrix] gpu_available={gpu_available}", style="cyan")
|
||||
|
||||
results: list[CaseResult] = []
|
||||
for python_version in PYTHON_VERSIONS:
|
||||
for row in CASES:
|
||||
print_line(
|
||||
f"\n[matrix] running py={python_version} row={row.row_id}", style="blue"
|
||||
)
|
||||
result = run_case(
|
||||
repo_root=repo_root,
|
||||
python_version=python_version,
|
||||
row=row,
|
||||
sample_audio=sample_audio,
|
||||
timeout_sec=args.timeout_sec,
|
||||
gpu_available=gpu_available,
|
||||
logs_dir=logs_dir,
|
||||
)
|
||||
result = apply_expected_failure_policy(result)
|
||||
results.append(result)
|
||||
print_line(
|
||||
f"[matrix] {result.status} py={result.python_version} "
|
||||
f"row={result.row_id} reason={result.reason} duration={result.duration_sec:.3f}s",
|
||||
style=status_style(result.status),
|
||||
)
|
||||
if result.log_path:
|
||||
print_line(f"[matrix] log={result.log_path}", style="dim")
|
||||
|
||||
print_summary(results)
|
||||
fail_count = sum(1 for row in results if row.status == "FAIL")
|
||||
return 1 if fail_count else 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -150,7 +150,10 @@ def float32_to_s16le_bytes(audio: np.ndarray) -> bytes:
|
||||
|
||||
def create_engine(
|
||||
backend: str, model_size: str, lan: str,
|
||||
diarization: bool = False, vac: bool = True, policy: str = "",
|
||||
diarization: bool = False,
|
||||
diarization_backend: str = "",
|
||||
vac: bool = True,
|
||||
policy: str = "",
|
||||
):
|
||||
"""Create a TranscriptionEngine with the given backend config."""
|
||||
import gc
|
||||
@@ -169,6 +172,8 @@ def create_engine(
|
||||
transcription=True,
|
||||
diarization=diarization,
|
||||
)
|
||||
if diarization_backend:
|
||||
kwargs["diarization_backend"] = diarization_backend
|
||||
if model_size:
|
||||
kwargs["model_size"] = model_size
|
||||
if policy:
|
||||
@@ -179,13 +184,18 @@ def create_engine(
|
||||
|
||||
def _extract_text_from_response(response_dict: dict) -> str:
|
||||
"""Extract full transcription text from a FrontData dict."""
|
||||
def _strip_or_empty(value: object) -> str:
|
||||
return value.strip() if isinstance(value, str) else ""
|
||||
|
||||
segments = response_dict.get("lines", [])
|
||||
full_text = " ".join(
|
||||
seg.get("text", "").strip()
|
||||
text
|
||||
for seg in segments
|
||||
if seg.get("text", "").strip()
|
||||
if isinstance(seg, dict)
|
||||
for text in [_strip_or_empty(seg.get("text"))]
|
||||
if text
|
||||
)
|
||||
buf = response_dict.get("buffer_transcription", "").strip()
|
||||
buf = _strip_or_empty(response_dict.get("buffer_transcription"))
|
||||
if buf:
|
||||
full_text = f"{full_text} {buf}".strip() if full_text else buf
|
||||
return full_text
|
||||
@@ -236,7 +246,8 @@ async def run_test(
|
||||
# Only print when transcription text actually changes
|
||||
current_text = _extract_text_from_response(d)
|
||||
if current_text and current_text != last_printed_text:
|
||||
buf = d.get("buffer_transcription", "").strip()
|
||||
buf = d.get("buffer_transcription")
|
||||
buf = buf.strip() if isinstance(buf, str) else ""
|
||||
committed = current_text
|
||||
if buf and committed.endswith(buf):
|
||||
committed = committed[:-len(buf)].strip()
|
||||
@@ -686,6 +697,12 @@ def main():
|
||||
"--diarization", action="store_true",
|
||||
help="Enable speaker diarization.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--diarization-backend",
|
||||
default="",
|
||||
choices=["diart", "sortformer"],
|
||||
help="Diarization backend when --diarization is enabled.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--benchmark", action="store_true",
|
||||
help="Run benchmark across all detected backend+policy combinations.",
|
||||
@@ -748,7 +765,10 @@ def main():
|
||||
logger.info(f"Creating {args.backend} engine...")
|
||||
engine = create_engine(
|
||||
args.backend, args.model_size, args.lan,
|
||||
diarization=args.diarization, vac=vac, policy=policy,
|
||||
diarization=args.diarization,
|
||||
diarization_backend=args.diarization_backend,
|
||||
vac=vac,
|
||||
policy=policy,
|
||||
)
|
||||
logger.info("Engine ready.")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user